diff --git a/go.mod b/go.mod index 05618bc71..93d5d846f 100644 --- a/go.mod +++ b/go.mod @@ -1,35 +1,38 @@ module github.com/openshift-pipelines/opc go 1.22.5 +toolchain go1.22.9 require ( github.com/openshift-pipelines/manual-approval-gate v0.4.0 github.com/openshift-pipelines/pipelines-as-code v0.29.0 github.com/spf13/cobra v1.8.1 - github.com/tektoncd/cli v0.38.1 + github.com/tektoncd/cli v0.39.0 github.com/tektoncd/results v0.13.0 ) replace k8s.io/client-go => k8s.io/client-go v0.31.0 require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/firestore v1.16.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect - cloud.google.com/go/kms v1.18.5 // indirect - cloud.google.com/go/longrunning v0.5.12 // indirect - cloud.google.com/go/storage v1.43.0 // indirect + cel.dev/expr v0.16.1 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/firestore v1.17.0 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/kms v1.20.0 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + cloud.google.com/go/storage v1.46.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/AlecAivazis/survey/v2 v2.3.7 // indirect - github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect @@ -43,7 +46,10 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/IBM/sarama v1.43.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect + github.com/IBM/sarama v1.43.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/PuerkitoBio/goquery v1.9.2 // indirect @@ -58,7 +64,7 @@ require ( github.com/alibabacloud-go/tea v1.2.1 // indirect github.com/alibabacloud-go/tea-utils v1.4.5 // indirect github.com/alibabacloud-go/tea-xml v1.1.3 // indirect - github.com/aliyun/credentials-go v1.3.1 // indirect + github.com/aliyun/credentials-go v1.3.2 // indirect github.com/andybalholm/cascadia v1.3.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -78,7 +84,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 // indirect @@ -89,12 +95,13 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/blendle/zapdriver v1.3.1 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 // indirect - github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/coreos/go-oidc/v3 v3.11.0 // indirect @@ -103,20 +110,22 @@ require ( github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v27.1.2+incompatible // indirect + github.com/docker/cli v27.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.2+incompatible // indirect + github.com/docker/docker v27.3.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect @@ -125,7 +134,7 @@ require ( github.com/go-chi/chi/v5 v5.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -165,7 +174,7 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/google/wire v0.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grafeas/grafeas v0.2.3 // indirect @@ -182,9 +191,9 @@ require ( github.com/hashicorp/go-sockaddr v1.0.5 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect - github.com/hashicorp/vault/api v1.14.0 // indirect + github.com/hashicorp/vault/api v1.15.0 // indirect github.com/imdario/mergo v0.3.16 // indirect - github.com/in-toto/attestation v1.0.1 // indirect + github.com/in-toto/attestation v1.1.0 // indirect github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -193,7 +202,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jellydator/ttlcache/v3 v3.2.0 // indirect + github.com/jellydator/ttlcache/v3 v3.3.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joho/godotenv v1.5.1 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect @@ -225,7 +234,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/montanaflynn/stats v0.7.1 // indirect - github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect @@ -236,11 +245,12 @@ require ( github.com/openshift/api v0.0.0-20240422085825-2624175e9673 // indirect github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.58.0 // indirect @@ -255,15 +265,15 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v2 v2.4.0 // indirect - github.com/sigstore/fulcio v1.5.1 // indirect + github.com/sigstore/cosign/v2 v2.4.1 // indirect + github.com/sigstore/fulcio v1.6.3 // indirect github.com/sigstore/protobuf-specs v0.3.2 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 // indirect + github.com/sigstore/sigstore v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 // indirect github.com/sigstore/timestamp-authority v1.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect @@ -272,14 +282,14 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.19.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/tektoncd/chains v0.22.0 // indirect - github.com/tektoncd/hub v1.18.0 // indirect - github.com/tektoncd/pipeline v0.65.0 // indirect - github.com/tektoncd/triggers v0.29.1 // indirect + github.com/tektoncd/chains v0.23.0 // indirect + github.com/tektoncd/hub v1.19.0 // indirect + github.com/tektoncd/pipeline v0.65.2 // indirect + github.com/tektoncd/triggers v0.30.0 // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect @@ -287,55 +297,59 @@ require ( github.com/transparency-dev/merkle v0.0.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xanzy/go-gitlab v0.108.0 // indirect + github.com/xanzy/go-gitlab v0.109.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/xlzd/gotp v0.1.0 // indirect - github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/zeebo/errs v1.3.0 // indirect - go.mongodb.org/mongo-driver v1.15.0 // indirect + go.mongodb.org/mongo-driver v1.16.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - go.step.sm/crypto v0.51.1 // indirect + go.step.sm/crypto v0.51.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - goa.design/goa/v3 v3.18.2 // indirect + goa.design/goa/v3 v3.19.1 // indirect gocloud.dev v0.40.0 // indirect - gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 // indirect - gocloud.dev/pubsub/kafkapubsub v0.37.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + gocloud.dev/docstore/mongodocstore v0.40.0 // indirect + gocloud.dev/pubsub/kafkapubsub v0.40.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.195.0 // indirect - google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/api v0.203.0 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.31.0 // indirect + k8s.io/api v0.31.3 // indirect k8s.io/apiextensions-apiserver v0.31.0 // indirect - k8s.io/apimachinery v0.31.0 // indirect - k8s.io/cli-runtime v0.29.8 // indirect + k8s.io/apimachinery v0.31.3 // indirect + k8s.io/cli-runtime v0.29.11 // indirect k8s.io/client-go v1.5.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect diff --git a/go.sum b/go.sum index 3a372fe63..114a5251b 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -13,12 +15,12 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.1 h1:+pMtLEV2k0AXKvs/tGZojuj6QaioxfUjOpMsG5Gtx+w= -cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -26,31 +28,37 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.16.0 h1:YwmDHcyrxVRErWcgxunzEaZxtNbc8QoFYA/JOEwDPgc= -cloud.google.com/go/firestore v1.16.0/go.mod h1:+22v/7p+WNBSQwdSwP57vz47aZiY+HrDkrOsJNhk7rg= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/kms v1.18.5 h1:75LSlVs60hyHK3ubs2OHd4sE63OAMcM2BdSJc2bkuM4= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= -cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= +cloud.google.com/go/firestore v1.17.0 h1:iEd1LBbkDZTFsLw3sTH50eyg4qe8eoG6CjocmEXO9aQ= +cloud.google.com/go/firestore v1.17.0/go.mod h1:69uPx1papBsY8ZETooc71fOhoKkD70Q1DwMrtKuOT/Y= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= +cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= -cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= +cloud.google.com/go/pubsub v1.44.0 h1:pLaMJVDTlnUDIKT5L0k53YyLszfBbGoUBo/IqDK/fEI= +cloud.google.com/go/pubsub v1.44.0/go.mod h1:BD4a/kmE8OePyHoa1qAHEw1rMzXX+Pc8Se54T/8mc3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/storage v1.46.0 h1:OTXISBpFd8KaA2ClT3K3oRk8UGOcTHtrZ1bW88xKiic= +cloud.google.com/go/storage v1.46.0/go.mod h1:lM+gMAW91EfXIeMTBmixRsKL/XCxysytoAgduVikjMk= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= @@ -68,14 +76,16 @@ github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjq github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 h1:eXzkOEXbSTOa7cJ7EqeCVi/OFi/ppDrUtQuttCWy74c= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= @@ -107,12 +117,22 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= -github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= @@ -175,8 +195,8 @@ github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCE github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= -github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= -github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -224,8 +244,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 h1:tHxQi/XHP github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 h1:E5ZAVOmI2apR8ADb72Q63KqwwwdW1XcMeXIlrZ1Psjg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4/go.mod h1:wezzqVUOVVdk+2Z/JzQT4NxAU0NbhRe5W8pIE72jsWI= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 h1:UPTdlTOwWUX49fVi7cymEN6hDqCwe3LNv1vi7TXUutk= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.3/go.mod h1:gjDP16zn+WWalyaUqwCCioQ8gU8lzttCCc9jYsiQI/8= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3 h1:neNOYJl72bHrz9ikAEED4VqWyND/Po0DnEx64RW6YM4= github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3/go.mod h1:TMhLIyRIyoGVlaEMAt+ITMbwskSTpcGsCPDq91/ihY0= github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 h1:HJwZwRt2Z2Tdec+m+fPjvdmkq2s9Ra+VR0hjF7V2o40= @@ -250,17 +270,17 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 h1:R9d0v+iobRHSaE4wKUnXFiZp53AL4ED5MzgEMwGTZag= github.com/bradleyfalzon/ghinstallation/v2 v2.11.0/go.mod h1:0LWKQwOHewXO/1acI6TtyE0Xc4ObDb2rFN7eHBAG71M= -github.com/buildkite/agent/v3 v3.76.2 h1:SweFq3e0N20RikWsVeOXzTjfr0AoOskxm9c0bcNyI0E= -github.com/buildkite/agent/v3 v3.76.2/go.mod h1:9ffbmJD7d7C/nOcElj6Qm+uIj1QoYh3NNvka4rkKkss= -github.com/buildkite/go-pipeline v0.10.0 h1:EDffu+LfMY2k5u+iEdo6Jn3obGKsrL5wicc1O/yFeRs= -github.com/buildkite/go-pipeline v0.10.0/go.mod h1:eMH1kiav5VeiTiu0Mk2/M7nZhKyFeL4iGj7Y7rj4f3w= +github.com/buildkite/agent/v3 v3.81.0 h1:JVfkng2XnsXesFXwiFwLJFkuzVu4zvoJCvedfoIXD6E= +github.com/buildkite/agent/v3 v3.81.0/go.mod h1:edJeyycODRxaFvpT22rDGwaQ5oa4eB8GjtbjgX5VpFw= +github.com/buildkite/go-pipeline v0.13.1 h1:Y9p8pQIwPtauVwNrcmTDH6+XK7jE1nLuvWVaK8oymA8= +github.com/buildkite/go-pipeline v0.13.1/go.mod h1:2HHqlSFTYgHFhzedJu0LhLs9n5c9XkYnHiQFVN5HE4U= github.com/buildkite/interpolate v0.1.3 h1:OFEhqji1rNTRg0u9DsSodg63sjJQEb1uWbENq9fUOBM= github.com/buildkite/interpolate v0.1.3/go.mod h1:UNVe6A+UfiBNKbhAySrBbZFZFxQ+DXr9nWen6WVt/A8= github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -283,6 +303,8 @@ github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUK github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= @@ -296,8 +318,8 @@ github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDh github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= @@ -308,6 +330,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= @@ -315,18 +339,18 @@ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1G github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v27.1.2+incompatible h1:nYviRv5Y+YAKx3dFrTvS1ErkyVVunKOhoweCTE1BsnI= -github.com/docker/cli v27.1.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= -github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= @@ -338,7 +362,11 @@ github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= @@ -355,8 +383,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -379,8 +407,8 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= -github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -554,8 +582,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= @@ -610,8 +638,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= -github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82 h1:uf1FmugJNeFovjWtxD7FSPWQXdi0KuKnZfvN4CFUAtA= github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= @@ -622,8 +650,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/in-toto/attestation v1.0.1 h1:DgX1XuBkryTpj1Piq8AiMK3CMfEcec3Qv6+Ku+uI3WY= -github.com/in-toto/attestation v1.0.1/go.mod h1:hCR5COCuENh5+VfojEkJnt7caOymbEgvyZdKifD6pOw= +github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= +github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 h1:cwCITdi9pF50CF8uh40qDbkJ/VrEVzx5AoaHP7OPdEo= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09/go.mod h1:yGCBn2JKF1m26FX8GmkcLSOFVjB6khWRxFsHwWIg7hw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -642,8 +670,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= -github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -675,6 +703,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= @@ -760,8 +790,8 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= -github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -779,8 +809,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= -github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -795,8 +825,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/open-policy-agent/opa v0.67.0 h1:FOdsO9yNhfmrh+72oVK7ImWmzruG+VSpfbr5IBqEWVs= -github.com/open-policy-agent/opa v0.67.0/go.mod h1:aqKlHc8E2VAAylYE9x09zJYr/fYzGX+JKne89UGqFzk= +github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= +github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -815,8 +845,8 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= @@ -828,6 +858,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -870,6 +902,8 @@ github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62 github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= @@ -900,26 +934,26 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v2 v2.4.0 h1:2NdidNgClg+oXr/fDIr37E/BE6j00gqgUhSiBK2kjSQ= -github.com/sigstore/cosign/v2 v2.4.0/go.mod h1:j+fH1DCUkcn92qp6ezDj4JbGMri6eG1nLJC+hs64rvc= -github.com/sigstore/fulcio v1.5.1 h1:Iasy1zfNjaq8BV4S8o6pXspLDU28PQC2z07GmOu9zpM= -github.com/sigstore/fulcio v1.5.1/go.mod h1:W1A/UHrTopy1IBZPMtHmxg7GPYAu+vt5dRXM3W6yjPo= +github.com/sigstore/cosign/v2 v2.4.1 h1:b8UXEfJFks3hmTwyxrRNrn6racpmccUycBHxDMkEPvU= +github.com/sigstore/cosign/v2 v2.4.1/go.mod h1:GvzjBeUKigI+XYnsoVQDmMAsMMc6engxztRSuxE+x9I= +github.com/sigstore/fulcio v1.6.3 h1:Mvm/bP6ELHgazqZehL8TANS1maAkRoM23CRAdkM4xQI= +github.com/sigstore/fulcio v1.6.3/go.mod h1:5SDgLn7BOUVLKe1DwOEX3wkWFu5qEmhUlWm+SFf0GH8= github.com/sigstore/protobuf-specs v0.3.2 h1:nCVARCN+fHjlNCk3ThNXwrZRqIommIeNKWwQvORuRQo= github.com/sigstore/protobuf-specs v0.3.2/go.mod h1:RZ0uOdJR4OB3tLQeAyWoJFbNCBFrPQdcokntde4zRBA= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.8 h1:B6ZQPBKK7Z7tO3bjLNnlCMG+H66tO4E/+qAphX8T/hg= -github.com/sigstore/sigstore v1.8.8/go.mod h1:GW0GgJSCTBJY3fUOuGDHeFWcD++c4G8Y9K015pwcpDI= -github.com/sigstore/sigstore-go v0.5.1 h1:5IhKvtjlQBeLnjKkzMELNG4tIBf+xXQkDzhLV77+/8Y= -github.com/sigstore/sigstore-go v0.5.1/go.mod h1:TuOfV7THHqiDaUHuJ5+QN23RP/YoKmsbwJpY+aaYPN0= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 h1:2zHmUvaYCwV6LVeTo+OAkTm8ykOGzA9uFlAjwDPAUWM= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 h1:RKk4Z+qMaLORUdT7zntwMqKiYAej1VQlCswg0S7xNSY= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 h1:89Xtxj8oqZt3UlSpCP4wApFvnQ2Z/dgowW5QOVhQigI= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 h1:Zte3Oogkd8m+nu2oK3yHtGmN++TZWh2Lm6q2iSprT1M= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4= +github.com/sigstore/sigstore v1.8.10 h1:r4t+TYzJlG9JdFxMy+um9GZhZ2N1hBTyTex0AHEZxFs= +github.com/sigstore/sigstore v1.8.10/go.mod h1:BekjqxS5ZtHNJC4u3Q3Stvfx2eyisbW/lUZzmPU2u4A= +github.com/sigstore/sigstore-go v0.6.1 h1:tGkkv1oDIER+QYU5MrjqlttQOVDWfSkmYwMqkJhB/cg= +github.com/sigstore/sigstore-go v0.6.1/go.mod h1:Xe5GHmUeACRFbomUWzVkf/xYCn8xVifb9DgqJrV2dIw= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 h1:e5GfVngPjGap/N3ODefayt7vKIPS1/v3hWLZ9+4MrN4= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10/go.mod h1:HOr3AdFPKdND2FNl/sUD5ZifPl1OMJvrbf9xIaaWcus= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 h1:9tZEpfIL/ewAG9G87AHe3aVoy8Ujos2F1qLfCckX6jQ= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10/go.mod h1:VnIAcitund62R45ezK/dtUeEhuRtB3LsAgJ8m0H34zc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 h1:Xre51HdjIIaVo5ox5zyL+6h0tkrx7Ke9Neh7fLmmZK0= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10/go.mod h1:VNfdklQDbyGJog8S7apdxiEfmYmCkKyxrsCL9xprkTY= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 h1:HjfjL3x3dP2kaGqQHVog974cTcKfzFaGjfZyLQ9KXrg= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10/go.mod h1:jaeEjkTW1p3gUyPjz9lTcT4TydCs208FoyAwIs6bIT4= github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -946,8 +980,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8= -github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY= +github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= +github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -978,24 +1012,24 @@ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDd github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tektoncd/chains v0.22.0 h1:9rgm+skfKpmIAh0CpHSPT6i2R2kmH815YF5iVnvNEMM= -github.com/tektoncd/chains v0.22.0/go.mod h1:5FsO4gIKUIlJ4ohmmMXep0GPMWN1oEwRLXiETmU7XhY= -github.com/tektoncd/cli v0.38.1 h1:dp3k1Ad6ao5HcDR6LHdIA3KgOgztu4deJpil3iDl76U= -github.com/tektoncd/cli v0.38.1/go.mod h1:5RELvDGAhFxntKnCVZbEOJ+a1bEIVTPNivg1gcdfxKo= -github.com/tektoncd/hub v1.18.0 h1:q9EN0gBKYyHsa9SnyT0LbFP47IG8nH7g1oqzKlObAww= -github.com/tektoncd/hub v1.18.0/go.mod h1:pPiMmCE7ORjL+S+ZYkqRGgaaUpRIkdzSFvv/numhO54= -github.com/tektoncd/pipeline v0.65.0 h1:MIXXt/OeV/SLQ0KYXXBzPHBwXe2iIhgZcJBHkkgzaYY= -github.com/tektoncd/pipeline v0.65.0/go.mod h1:V3cyfxxc7b3GLT2a13GX2mWA86qmxWhh4mOp4gfFQwQ= +github.com/tektoncd/chains v0.23.0 h1:zRhzSUlAPxL/dUH0AXWcW8AtJbF2FrqM2yTyR3J8A5U= +github.com/tektoncd/chains v0.23.0/go.mod h1:Bhyomb/vr0cnI4+VJmjeukRCyti4XGWb9GPWSttAFIs= +github.com/tektoncd/cli v0.39.0 h1:sGsDnEa0nsX11L2hKKOZuNOaEQqwDS2mQHlqMM4m34Q= +github.com/tektoncd/cli v0.39.0/go.mod h1:yUfVpErsZDTtQw3uQvCxukOhgOC0SZfrXEhOu9/kmkc= +github.com/tektoncd/hub v1.19.0 h1:UXl/jPB9uoa4ULyh64SN1VuKMMIZvXq6bkRGI8ATU+g= +github.com/tektoncd/hub v1.19.0/go.mod h1:JJvxZ37nPygZOaUgDIQddihWnD+hSKExPs261FXmttc= +github.com/tektoncd/pipeline v0.65.2 h1:N63Xb9uiunewPVDTz4nGamJOtVg+Q38Cy4LRpvr+2e4= +github.com/tektoncd/pipeline v0.65.2/go.mod h1:V3cyfxxc7b3GLT2a13GX2mWA86qmxWhh4mOp4gfFQwQ= github.com/tektoncd/results v0.13.0 h1:+YZwUt7QGx15Ihu/oh9tmlKVtTv68GI6rCsiLTwnU/g= github.com/tektoncd/results v0.13.0/go.mod h1:xrq3DRmBKqrrhT1nHVCx4w1N+T/B0guCd+qzdZhY8w8= -github.com/tektoncd/triggers v0.29.1 h1:UXqjJICaRsWYb0qkIYOUlqaDR5te9Zmfrz93+TXy3ug= -github.com/tektoncd/triggers v0.29.1/go.mod h1:yVNxCSlYw//uKoXDi4kzzwYGkK2KIYLt6FwwSTz0aj8= +github.com/tektoncd/triggers v0.30.0 h1:1RV3yxRlEN565qHYG8vIKyfrU3QVZkPuv67qurLeSYg= +github.com/tektoncd/triggers v0.30.0/go.mod h1:YkhGaFuL+z4aErBHz66di1dwuDjowmryTq6OAfQvpus= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.0.0 h1:rD8d9RotYBprZVgC+9oyTZ5MmawepnTSTqoDuxjWgbs= -github.com/theupdateframework/go-tuf/v2 v2.0.0/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= +github.com/theupdateframework/go-tuf/v2 v2.0.1 h1:11p9tXpq10KQEujxjcIjDSivMKCMLguls7erXHZnxJQ= +github.com/theupdateframework/go-tuf/v2 v2.0.1/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -1015,8 +1049,8 @@ github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinC github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xanzy/go-gitlab v0.108.0 h1:IEvEUWFR5G1seslRhJ8gC//INiIUqYXuSUoBd7/gFKE= -github.com/xanzy/go-gitlab v0.108.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= +github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= +github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -1033,8 +1067,8 @@ github.com/xlzd/gotp v0.1.0 h1:37blvlKCh38s+fkem+fFh7sMnceltoIEBYTVXyoa5Po= github.com/xlzd/gotp v0.1.0/go.mod h1:ndLJ3JKzi3xLmUProq4LLxCuECL93dG9WASNLpHz8qg= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 h1:tBiBTKHnIjovYoLX/TPkcf+OjqqKGQrPtGT3Foz+Pgo= -github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76/go.mod h1:SQliXeA7Dhkt//vS29v3zpbEwoa+zb2Cn5xj5uO4K5U= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= @@ -1055,8 +1089,8 @@ github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97 github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= -go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= +go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1065,22 +1099,26 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.step.sm/crypto v0.51.1 h1:ktUg/2hetEMiBAqgz502ktZDGoDoGrcHFg3XpkmkvvA= -go.step.sm/crypto v0.51.1/go.mod h1:PdrhttNU/tG9/YsVd4fdlysBN+UV503p0o2irFZQlAw= +go.step.sm/crypto v0.51.2 h1:5EiCGIMg7IvQTGmJrwRosbXeprtT80OhoS/PJarg60o= +go.step.sm/crypto v0.51.2/go.mod h1:QK7czLjN2k+uqVp5CHXxJbhc70kVRSP+0CQF3zsR5M0= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -1090,14 +1128,14 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -goa.design/goa/v3 v3.18.2 h1:2t3RHgc2tTYEExEwRlnkrSsFD82rsZ5mBkv5JUBvelY= -goa.design/goa/v3 v3.18.2/go.mod h1:jS66/I2PCh9MPwXtFY8vWb4pGQDYz3qVK3TjjvRLEUM= +goa.design/goa/v3 v3.19.1 h1:jpV3LEy7YANzPMwm++Lu17RoThRJgXrPxdEM0A1nlOE= +goa.design/goa/v3 v3.19.1/go.mod h1:astNE9ube0YCxqq7DQkt1MtLxB/b3kRPEFkEZovcO2I= gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng= gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= -gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 h1:71QcVqNRzyPI+f3v38XK+TjrLusNUFIYA7Wro48aX2I= -gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18/go.mod h1:IN/uj8zLRQ0sZ5UffdERqoB5MT7DsmyMQtZDht9cknQ= -gocloud.dev/pubsub/kafkapubsub v0.37.0 h1:rH122Q2COVNhAGRyid/FHY164LAZhss9V5DiIKQ5Gos= -gocloud.dev/pubsub/kafkapubsub v0.37.0/go.mod h1:y0a+Rv5JvNuVyv1qGic2m1bDoy0ZArWLqcxm/1WxuB8= +gocloud.dev/docstore/mongodocstore v0.40.0 h1:KTwP9Wr3PNPxN3bItxtK/RHiF4D6cTYjE5GDkPKcJrI= +gocloud.dev/docstore/mongodocstore v0.40.0/go.mod h1:wjrLAUAoAKA1jbteT4nP36Gy5egPAHvwBfAJFUH9YKI= +gocloud.dev/pubsub/kafkapubsub v0.40.0 h1:6gfRtOUd/ztKAQLYPUl7xFIVre9okcvKUp0CMaMSY18= +gocloud.dev/pubsub/kafkapubsub v0.40.0/go.mod h1:/03R9nFjSFfjM/dPVnQdtbAQL4InkE7832zhvE1x034= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1115,11 +1153,12 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1130,8 +1169,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= -golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1156,8 +1195,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1208,11 +1247,12 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1221,8 +1261,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1238,6 +1278,7 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1305,11 +1346,12 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1320,11 +1362,12 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1340,13 +1383,13 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1396,8 +1439,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1424,8 +1467,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= +google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1463,12 +1506,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1482,8 +1525,10 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1498,8 +1543,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1543,14 +1588,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.29.8 h1:kVErAPf1v7MOwNO6rBYnf2i4kQ2668Y9pHGO5C1/wSo= -k8s.io/cli-runtime v0.29.8/go.mod h1:c00Fk85K05DtEknMAi1t7ao1MR4nmQ9YlvC+QluvNoY= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.29.11 h1:NSVtRjAGU2IKcoEhOclOCk3p+biRQDWlBm8T4pGyRXY= +k8s.io/cli-runtime v0.29.11/go.mod h1:pEQ3MTwNg3FOD2K4KWK913gCP24TnPc/7pv7+mzkaJo= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 000000000..579c9d21e --- /dev/null +++ b/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +6.4.0 +# Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 000000000..3de1ec213 --- /dev/null +++ b/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore new file mode 100644 index 000000000..ac51a054d --- /dev/null +++ b/vendor/cel.dev/expr/.gitignore @@ -0,0 +1 @@ +bazel-* diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 000000000..0bbe9ed77 --- /dev/null +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +go_library( + name = "expr", + srcs = [ + "checked.pb.go", + "eval.pb.go", + "explain.pb.go", + "syntax.pb.go", + "value.pb.go", + ], + importpath = "cel.dev/expr", + visibility = ["//visibility:public"], + deps = [ + "//proto/cel/expr:google_rpc_status_go_proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +alias( + name = "go_default_library", + actual = ":expr", + visibility = ["//visibility:public"], +) diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..59908e2d8 --- /dev/null +++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 000000000..8f5fd5c31 --- /dev/null +++ b/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 000000000..0a525bc17 --- /dev/null +++ b/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/vendor/cel.dev/expr/LICENSE b/vendor/cel.dev/expr/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cel.dev/expr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 000000000..1ed2eb8ab --- /dev/null +++ b/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md new file mode 100644 index 000000000..2da1e7f2f --- /dev/null +++ b/vendor/cel.dev/expr/README.md @@ -0,0 +1,65 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A binary representation of an expression. It is an abstract syntax tree + (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 000000000..bb4c469ad --- /dev/null +++ b/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 05/26/2023 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99", + sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571", + urls = [ + "https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=", + version = "v0.0.0-20230525234035-dd9d682886f9", +) + +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=", + version = "v0.0.0-20230525234030-28d5490b6b19", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 000000000..bb225c8ab --- /dev/null +++ b/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 000000000..8a8ea3763 --- /dev/null +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:6.4.0' + entrypoint: bazel + args: ['test', '--test_output=errors', '...'] + id: bazel-test + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 000000000..8f651f9cc --- /dev/null +++ b/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 000000000..79fd5443b --- /dev/null +++ b/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 000000000..abf2f9788 --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/test/... +files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 000000000..9a13479e4 --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 000000000..48a952872 --- /dev/null +++ b/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 000000000..e5e29228c --- /dev/null +++ b/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json index 841b54357..39ed1f947 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,18 +1,18 @@ { "ai": "0.8.2", "aiplatform": "1.68.0", - "auth": "0.8.1", + "auth": "0.9.7", "auth/oauth2adapt": "0.2.4", - "bigquery": "1.62.0", - "bigtable": "1.29.0", - "datastore": "1.17.1", + "bigquery": "1.63.1", + "bigtable": "1.33.0", + "datastore": "1.19.0", "errorreporting": "0.3.1", - "firestore": "1.16.0", + "firestore": "1.17.0", "logging": "1.11.0", "profiler": "0.4.1", - "pubsub": "1.41.0", + "pubsub": "1.44.0", "pubsublite": "1.8.2", - "spanner": "1.66.0", - "storage": "1.43.0", - "vertexai": "0.12.0" + "spanner": "1.69.0", + "storage": "1.44.0", + "vertexai": "0.13.1" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 8ac2f38f9..edbdcf47f 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,149 +1,150 @@ { - "accessapproval": "1.7.12", - "accesscontextmanager": "1.8.12", - "advisorynotifications": "1.4.6", - "alloydb": "1.10.7", - "analytics": "0.24.0", - "apigateway": "1.6.12", - "apigeeconnect": "1.6.12", - "apigeeregistry": "0.8.10", - "apikeys": "1.1.12", - "appengine": "1.8.12", - "apphub": "0.1.6", - "apps": "0.4.7", - "area120": "0.8.12", - "artifactregistry": "1.14.14", - "asset": "1.19.6", - "assuredworkloads": "1.11.12", - "automl": "1.13.12", - "backupdr": "1.0.4", - "baremetalsolution": "1.2.11", - "batch": "1.9.3", - "beyondcorp": "1.0.11", - "billing": "1.18.10", - "binaryauthorization": "1.8.8", - "certificatemanager": "1.8.6", - "channel": "1.17.12", - "chat": "0.3.1", - "cloudbuild": "1.16.6", - "cloudcontrolspartner": "1.0.4", - "clouddms": "1.7.11", - "cloudprofiler": "0.3.6", - "cloudquotas": "1.0.4", - "cloudtasks": "1.12.13", - "commerce": "1.0.5", - "compute": "1.27.5", - "compute/metadata": "0.5.0", - "confidentialcomputing": "1.6.1", - "config": "1.0.5", - "contactcenterinsights": "1.13.7", - "container": "1.38.1", - "containeranalysis": "0.12.2", - "datacatalog": "1.21.1", - "dataflow": "0.9.12", - "dataform": "0.9.9", - "datafusion": "1.7.12", - "datalabeling": "0.8.12", - "dataplex": "1.18.3", - "dataproc": "2.5.4", - "dataqna": "0.8.12", - "datastream": "1.10.11", - "deploy": "1.21.1", - "developerconnect": "0.1.4", - "dialogflow": "1.56.0", - "discoveryengine": "1.12.0", - "dlp": "1.17.0", - "documentai": "1.32.0", - "domains": "0.9.12", - "edgecontainer": "1.2.6", - "edgenetwork": "1.1.3", - "essentialcontacts": "1.6.13", - "eventarc": "1.13.11", - "filestore": "1.8.8", - "functions": "1.17.0", - "gkebackup": "1.5.5", - "gkeconnect": "0.8.12", - "gkehub": "0.14.12", - "gkemulticloud": "1.2.5", - "grafeas": "0.3.10", - "gsuiteaddons": "1.6.12", - "iam": "1.1.13", - "iap": "1.9.11", - "identitytoolkit": "0.1.4", - "ids": "1.4.12", - "iot": "1.7.12", - "kms": "1.18.5", - "language": "1.13.1", - "lifesciences": "0.9.12", - "longrunning": "0.5.12", - "managedidentities": "1.6.12", - "managedkafka": "0.1.6", - "maps": "1.11.7", - "mediatranslation": "0.8.12", - "memcache": "1.10.12", - "metastore": "1.13.11", - "migrationcenter": "1.0.5", - "monitoring": "1.20.4", - "netapp": "1.2.1", - "networkconnectivity": "1.14.11", - "networkmanagement": "1.13.7", - "networksecurity": "0.9.12", - "networkservices": "0.1.6", - "notebooks": "1.11.10", - "optimization": "1.6.10", - "orchestration": "1.9.7", - "orgpolicy": "1.12.8", - "osconfig": "1.13.3", - "oslogin": "1.13.8", - "parallelstore": "0.5.1", - "phishingprotection": "0.8.12", - "policysimulator": "0.2.10", - "policytroubleshooter": "1.10.10", - "privatecatalog": "0.9.12", - "privilegedaccessmanager": "0.1.1", - "rapidmigrationassessment": "1.0.12", - "recaptchaenterprise": "2.14.3", - "recommendationengine": "0.8.12", - "recommender": "1.12.8", - "redis": "1.16.5", - "resourcemanager": "1.9.12", - "resourcesettings": "1.7.5", - "retail": "1.17.5", - "run": "1.4.1", - "scheduler": "1.10.13", - "secretmanager": "1.13.6", - "securesourcemanager": "1.1.1", - "security": "1.17.5", - "securitycenter": "1.34.0", - "securitycentermanagement": "1.0.4", - "securityposture": "0.1.8", - "servicecontrol": "1.13.7", - "servicedirectory": "1.11.12", - "servicehealth": "1.0.5", - "servicemanagement": "1.9.13", - "serviceusage": "1.8.11", - "shell": "1.7.12", - "shopping": "0.8.7", - "speech": "1.24.1", - "storageinsights": "1.0.12", - "storagetransfer": "1.10.11", - "streetview": "0.1.5", - "support": "1.0.11", - "talent": "1.6.13", - "telcoautomation": "1.0.4", - "texttospeech": "1.7.12", - "tpu": "1.6.12", - "trace": "1.10.12", - "translate": "1.11.0", - "video": "1.22.1", - "videointelligence": "1.11.12", - "vision": "2.8.7", - "visionai": "0.2.5", - "vmmigration": "1.7.12", - "vmwareengine": "1.2.1", - "vpcaccess": "1.7.12", - "webrisk": "1.9.12", - "websecurityscanner": "1.6.12", - "workflows": "1.12.11", - "workstations": "1.0.5" + "accessapproval": "1.8.1", + "accesscontextmanager": "1.9.1", + "advisorynotifications": "1.5.1", + "alloydb": "1.12.1", + "analytics": "0.25.1", + "apigateway": "1.7.1", + "apigeeconnect": "1.7.1", + "apigeeregistry": "0.9.1", + "apihub": "0.1.1", + "apikeys": "1.2.1", + "appengine": "1.9.1", + "apphub": "0.2.1", + "apps": "0.5.1", + "area120": "0.9.1", + "artifactregistry": "1.15.1", + "asset": "1.20.2", + "assuredworkloads": "1.12.1", + "automl": "1.14.1", + "backupdr": "1.1.1", + "baremetalsolution": "1.3.1", + "batch": "1.11.0", + "beyondcorp": "1.1.1", + "billing": "1.19.1", + "binaryauthorization": "1.9.1", + "certificatemanager": "1.9.1", + "channel": "1.18.1", + "chat": "0.6.0", + "cloudbuild": "1.18.0", + "cloudcontrolspartner": "1.2.0", + "clouddms": "1.8.1", + "cloudprofiler": "0.4.1", + "cloudquotas": "1.1.1", + "cloudtasks": "1.13.1", + "commerce": "1.1.1", + "compute": "1.28.1", + "compute/metadata": "0.5.2", + "confidentialcomputing": "1.7.1", + "config": "1.1.1", + "contactcenterinsights": "1.14.1", + "container": "1.40.0", + "containeranalysis": "0.13.1", + "datacatalog": "1.22.1", + "dataflow": "0.10.1", + "dataform": "0.10.1", + "datafusion": "1.8.1", + "datalabeling": "0.9.1", + "dataplex": "1.19.1", + "dataproc": "2.9.0", + "dataqna": "0.9.1", + "datastream": "1.11.1", + "deploy": "1.22.1", + "developerconnect": "0.2.1", + "dialogflow": "1.58.0", + "discoveryengine": "1.14.0", + "dlp": "1.19.0", + "documentai": "1.34.0", + "domains": "0.10.1", + "edgecontainer": "1.3.1", + "edgenetwork": "1.2.1", + "essentialcontacts": "1.7.1", + "eventarc": "1.14.1", + "filestore": "1.9.1", + "functions": "1.19.1", + "gkebackup": "1.6.1", + "gkeconnect": "0.11.1", + "gkehub": "0.15.1", + "gkemulticloud": "1.4.0", + "grafeas": "0.3.11", + "gsuiteaddons": "1.7.1", + "iam": "1.2.1", + "iap": "1.10.1", + "identitytoolkit": "0.2.1", + "ids": "1.5.1", + "iot": "1.8.1", + "kms": "1.20.0", + "language": "1.14.1", + "lifesciences": "0.10.1", + "longrunning": "0.6.1", + "managedidentities": "1.7.1", + "managedkafka": "0.2.1", + "maps": "1.14.0", + "mediatranslation": "0.9.1", + "memcache": "1.11.1", + "metastore": "1.14.1", + "migrationcenter": "1.1.1", + "monitoring": "1.21.1", + "netapp": "1.4.0", + "networkconnectivity": "1.15.1", + "networkmanagement": "1.14.1", + "networksecurity": "0.10.1", + "networkservices": "0.2.1", + "notebooks": "1.12.1", + "optimization": "1.7.1", + "orchestration": "1.11.0", + "orgpolicy": "1.14.0", + "osconfig": "1.14.1", + "oslogin": "1.14.1", + "parallelstore": "0.6.1", + "phishingprotection": "0.9.1", + "policysimulator": "0.3.1", + "policytroubleshooter": "1.11.1", + "privatecatalog": "0.10.1", + "privilegedaccessmanager": "0.2.1", + "rapidmigrationassessment": "1.1.1", + "recaptchaenterprise": "2.17.1", + "recommendationengine": "0.9.1", + "recommender": "1.13.1", + "redis": "1.17.1", + "resourcemanager": "1.10.1", + "resourcesettings": "1.8.1", + "retail": "1.18.1", + "run": "1.5.1", + "scheduler": "1.11.1", + "secretmanager": "1.14.1", + "securesourcemanager": "1.2.1", + "security": "1.18.1", + "securitycenter": "1.35.1", + "securitycentermanagement": "1.1.1", + "securityposture": "0.2.1", + "servicecontrol": "1.14.1", + "servicedirectory": "1.12.1", + "servicehealth": "1.1.1", + "servicemanagement": "1.10.1", + "serviceusage": "1.9.1", + "shell": "1.8.1", + "shopping": "0.10.0", + "speech": "1.25.1", + "storageinsights": "1.1.1", + "storagetransfer": "1.11.1", + "streetview": "0.2.1", + "support": "1.1.1", + "talent": "1.7.1", + "telcoautomation": "1.1.1", + "texttospeech": "1.8.1", + "tpu": "1.7.1", + "trace": "1.11.1", + "translate": "1.12.1", + "video": "1.23.1", + "videointelligence": "1.12.1", + "vision": "2.9.1", + "visionai": "0.4.1", + "vmmigration": "1.8.1", + "vmwareengine": "1.3.1", + "vpcaccess": "1.8.1", + "webrisk": "1.10.1", + "websecurityscanner": "1.7.1", + "workflows": "1.13.1", + "workstations": "1.1.1" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 7b1015e63..c8f1da56d 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.115.1" + ".": "0.116.0" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index d48e0cfba..adc725ca1 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09) + + +### Features + +* **genai:** Add tokenizer package ([#10699](https://github.com/googleapis/google-cloud-go/issues/10699)) ([214af16](https://github.com/googleapis/google-cloud-go/commit/214af1604bf3837f68e96dbf81c1331b90c9375f)) + ## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 995149790..63db0209c 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -28,12 +28,16 @@ For an updated list of all of our released APIs please see our ## [Go Versions Supported](#supported-versions) +**Note:** As of Jan 1, 2025 the Cloud Client Libraries for Go will support the +two most-recent major Go releases -- the same [policy](https://go.dev/doc/devel/release#policy) +the Go programming language follows. + Our libraries are compatible with at least the three most recent, major Go releases. They are currently compatible with: +- Go 1.23 - Go 1.22 - Go 1.21 -- Go 1.20 ## Authorization @@ -56,14 +60,14 @@ client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfil ``` You can exert more control over authorization by using the -[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass -[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) +[credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) package to +create an [auth.Credentials](https://pkg.go.dev/cloud.google.com/go/auth#Credentials). +Then pass [`option.WithAuthCredentials`](https://pkg.go.dev/google.golang.org/api/option#WithAuthCredentials) to the `NewClient` function: ```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +creds := ... +client, err := storage.NewClient(ctx, option.WithAuthCredentials(creds)) ``` ## Contributing diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index ea6df0caf..ecdc47dab 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,78 @@ # Changelog +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30) + + +### Features + +* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b)) + +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + ## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a0..6fe4f0763 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 2eb78d7b0..a7fa84f6f 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -130,7 +135,9 @@ func (t *Token) isEmpty() bool { } // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -220,9 +227,7 @@ type CredentialsOptions struct { UniverseDomainProvider CredentialsPropertyProvider } -// NewCredentials returns new [Credentials] from the provided options. Most users -// will want to build this object a function from the -// [cloud.google.com/go/auth/credentials] package. +// NewCredentials returns new [Credentials] from the provided options. func NewCredentials(opts *CredentialsOptions) *Credentials { creds := &Credentials{ TokenProvider: opts.TokenProvider, @@ -235,8 +240,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { return creds } -// CachedTokenProviderOptions provided options for configuring a -// CachedTokenProvider. +// CachedTokenProviderOptions provides options for configuring a cached +// [TokenProvider]. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, // even if it is expired. The default is false. Optional. @@ -246,7 +251,7 @@ type CachedTokenProviderOptions struct { // seconds. Optional. ExpireEarly time.Duration // DisableAsyncRefresh configures a synchronous workflow that refreshes - // stale tokens while blocking. The default is false. Optional. + // tokens in a blocking manner. The default is false. Optional. DisableAsyncRefresh bool } @@ -273,12 +278,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned // by the underlying provider. By default it will refresh tokens asynchronously -// (non-blocking mode) within a window that starts 3 minutes and 45 seconds -// before they expire. The asynchronous (non-blocking) refresh can be changed to -// a synchronous (blocking) refresh using the -// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry -// duration can be configured using the CachedTokenProviderOptions.ExpireEarly -// option. +// a few minutes before they expire. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp @@ -321,7 +321,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() @@ -336,13 +338,14 @@ func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() t := c.cachedToken + now := timeNow() if t == nil || t.Value == "" { return invalid } else if t.Expiry.IsZero() { return fresh - } else if timeNow().After(t.Expiry.Round(0)) { + } else if now.After(t.Expiry.Round(0)) { return invalid - } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { return stale } return fresh diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cce622418..010afc37c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -98,8 +98,8 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index b426e16d2..6591b1811 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06f..d8b5d4fde 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -94,32 +94,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d..6ae29de6c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index efc91c2b0..8696df148 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { if tp == nil { return false } @@ -66,6 +66,9 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } @@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 0442a5938..42d4cbe30 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -20,15 +22,19 @@ import ( "errors" "fmt" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -38,7 +44,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -46,6 +52,27 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + // ClientCertProvider is a function that returns a TLS client certificate to be // used when opening TLS connections. It follows the same semantics as // [crypto/tls.Config.GetClientCertificate]. @@ -271,7 +298,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -289,9 +319,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.NewClient(endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -325,15 +356,23 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { @@ -384,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt } return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } + +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d2..38e8c9939 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( @@ -153,6 +155,8 @@ type InternalOptions struct { // transport that sets the Authorization header with the value produced by the // provided [cloud.google.com/go/auth.Credentials]. An error is returned only // if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { if client == nil || creds == nil { return fmt.Errorf("httptransport: client and tp must not be nil") @@ -171,7 +175,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er client.Transport = &authTransport{ creds: creds, base: base, - // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. } return nil } diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 07eea4744..63498ee79 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -27,11 +28,12 @@ import ( "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, opts) trans = addOCTransport(trans, opts) switch { case opts.DisableAuthentication: @@ -76,7 +81,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ @@ -94,7 +102,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans @@ -171,13 +190,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 000000000..651bd61fb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 000000000..af490bf4f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 000000000..d92178df8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 000000000..16be9df30 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345ed..d8c161191 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,8 +38,11 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. @@ -197,7 +200,7 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe_domain") + return metadata.GetWithContext(ctx, "universe/universe-domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 26e037c1a..f606888f1 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -133,7 +133,11 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return defaultTransportCreds, config.endpoint, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress @@ -177,7 +181,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return config.clientCertSource, nil, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 366515916..6c954ae19 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba28..738cb2161 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf82..347aaced7 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 4df73edce..37894bfcd 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,6 +15,7 @@ package transport import ( + "context" "encoding/json" "fmt" "log" @@ -84,7 +85,7 @@ func getMetadataMTLSAutoConfig() { } var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) + return metadata.GetWithContext(context.Background(), configEndpointSuffix) } func queryConfig() (*mtlsConfig, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b171..cc586ec5b 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -81,12 +81,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index 7faf6e0c9..3df9f72ca 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + ## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac571..e266b0c9b 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,24 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + metadata := tok.Metadata + if metadata != nil { + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e27..da7db19b1 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b72..c160b4786 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f891..2e53f0123 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 133ff6855..8644f614c 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -79,12 +79,15 @@ are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: ctx := context.Background() - // https://pkg.go.dev/golang.org/x/oauth2/google - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + // https://pkg.go.dev/cloud.google.com/go/auth/credentials + creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: secretmanager.DefaultAuthScopes(), + CredentialsJSON: []byte("JSON creds") + }), secretmanager.DefaultAuthScopes()...) if err != nil { // TODO: handle error. } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + client, err := secretmanager.NewClient(ctx, option.WithAuthCredentials(creds)) if err != nil { // TODO: handle error. } diff --git a/vendor/cloud.google.com/go/firestore/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/firestore/apiv1/auxiliary_go123.go new file mode 100644 index 000000000..9b9a20e0c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1/auxiliary_go123.go @@ -0,0 +1,51 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package firestore + +import ( + "iter" + + firestorepb "cloud.google.com/go/firestore/apiv1/firestorepb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CursorIterator) All() iter.Seq2[*firestorepb.Cursor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *DocumentIterator) All() iter.Seq2[*firestorepb.Document, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationIterator) All() iter.Seq2[*longrunningpb.Operation, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StringIterator) All() iter.Seq2[string, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1/firestore_client.go b/vendor/cloud.google.com/go/firestore/apiv1/firestore_client.go index 4c1aaaf9e..97a15e007 100644 --- a/vendor/cloud.google.com/go/firestore/apiv1/firestore_client.go +++ b/vendor/cloud.google.com/go/firestore/apiv1/firestore_client.go @@ -78,6 +78,7 @@ func defaultGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://firestore.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -853,6 +854,7 @@ func defaultRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://firestore.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -1347,11 +1349,11 @@ func (c *restClient) GetDocument(ctx context.Context, req *firestorepb.GetDocume } } if req.GetReadTime() != nil { - readTime, err := protojson.Marshal(req.GetReadTime()) + field, err := protojson.Marshal(req.GetReadTime()) if err != nil { return nil, err } - params.Add("readTime", string(readTime[1:len(readTime)-1])) + params.Add("readTime", string(field[1:len(field)-1])) } if req.GetTransaction() != nil { params.Add("transaction", fmt.Sprintf("%v", req.GetTransaction())) @@ -1444,11 +1446,11 @@ func (c *restClient) ListDocuments(ctx context.Context, req *firestorepb.ListDoc params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) } if req.GetReadTime() != nil { - readTime, err := protojson.Marshal(req.GetReadTime()) + field, err := protojson.Marshal(req.GetReadTime()) if err != nil { return nil, "", err } - params.Add("readTime", string(readTime[1:len(readTime)-1])) + params.Add("readTime", string(field[1:len(field)-1])) } if req.GetShowMissing() { params.Add("showMissing", fmt.Sprintf("%v", req.GetShowMissing())) @@ -1537,11 +1539,11 @@ func (c *restClient) UpdateDocument(ctx context.Context, req *firestorepb.Update params.Add("currentDocument.exists", fmt.Sprintf("%v", req.GetCurrentDocument().GetExists())) } if req.GetCurrentDocument().GetUpdateTime() != nil { - updateTime, err := protojson.Marshal(req.GetCurrentDocument().GetUpdateTime()) + field, err := protojson.Marshal(req.GetCurrentDocument().GetUpdateTime()) if err != nil { return nil, err } - params.Add("currentDocument.updateTime", string(updateTime[1:len(updateTime)-1])) + params.Add("currentDocument.updateTime", string(field[1:len(field)-1])) } if items := req.GetMask().GetFieldPaths(); len(items) > 0 { for _, item := range items { @@ -1617,11 +1619,11 @@ func (c *restClient) DeleteDocument(ctx context.Context, req *firestorepb.Delete params.Add("currentDocument.exists", fmt.Sprintf("%v", req.GetCurrentDocument().GetExists())) } if req.GetCurrentDocument().GetUpdateTime() != nil { - updateTime, err := protojson.Marshal(req.GetCurrentDocument().GetUpdateTime()) + field, err := protojson.Marshal(req.GetCurrentDocument().GetUpdateTime()) if err != nil { return err } - params.Add("currentDocument.updateTime", string(updateTime[1:len(updateTime)-1])) + params.Add("currentDocument.updateTime", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/document.pb.go b/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/document.pb.go index f12491fb5..b36f1b1b7 100644 --- a/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/document.pb.go +++ b/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/document.pb.go @@ -24,6 +24,7 @@ import ( reflect "reflect" sync "sync" + _ "google.golang.org/genproto/googleapis/api/annotations" latlng "google.golang.org/genproto/googleapis/type/latlng" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -351,7 +352,7 @@ type Value_GeoPointValue struct { type Value_ArrayValue struct { // An array value. // - // Cannot directly contain another array value, though can contain an + // Cannot directly contain another array value, though can contain a // map which contains another array. ArrayValue *ArrayValue `protobuf:"bytes,9,opt,name=array_value,json=arrayValue,proto3,oneof"` } @@ -492,96 +493,98 @@ var file_google_firestore_v1_document_proto_rawDesc = []byte{ 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6c, 0x61, 0x74, 0x6c, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6c, 0x61, 0x74, 0x6c, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x1a, 0x55, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x04, 0x0a, 0x05, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x25, 0x0a, 0x0d, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, + 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, + 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x1a, 0x55, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x04, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, - 0x0a, 0x0d, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0c, - 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, - 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x29, 0x0a, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0f, 0x67, - 0x65, 0x6f, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x4c, 0x61, 0x74, 0x4c, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x65, 0x6f, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x0b, 0x61, 0x72, - 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, - 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x00, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0a, 0x41, 0x72, - 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xa4, 0x01, 0x0a, - 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x29, 0x0a, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0f, + 0x67, 0x65, 0x6f, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x4c, 0x61, 0x74, 0x4c, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x65, + 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x0b, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x3c, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, + 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0a, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x55, 0x0a, 0x0b, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0xc5, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, - 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, - 0x62, 0x3b, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, 0x62, 0xa2, 0x02, 0x04, - 0x47, 0x43, 0x46, 0x53, 0xaa, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, - 0xca, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1c, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x46, 0x69, - 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xa4, 0x01, + 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x55, 0x0a, + 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0xc5, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, + 0x42, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x70, 0x62, 0x3b, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, 0x62, 0xa2, 0x02, + 0x04, 0x47, 0x43, 0x46, 0x53, 0xaa, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, + 0x31, 0xca, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x5c, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1c, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x46, + 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/query.pb.go b/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/query.pb.go index 5c99a7c93..98b169190 100644 --- a/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/query.pb.go +++ b/vendor/cloud.google.com/go/firestore/apiv1/firestorepb/query.pb.go @@ -349,18 +349,21 @@ const ( StructuredQuery_FindNearest_DISTANCE_MEASURE_UNSPECIFIED StructuredQuery_FindNearest_DistanceMeasure = 0 // Measures the EUCLIDEAN distance between the vectors. See // [Euclidean](https://en.wikipedia.org/wiki/Euclidean_distance) to learn - // more + // more. The resulting distance decreases the more similar two vectors + // are. StructuredQuery_FindNearest_EUCLIDEAN StructuredQuery_FindNearest_DistanceMeasure = 1 - // Compares vectors based on the angle between them, which allows you to - // measure similarity that isn't based on the vectors magnitude. - // We recommend using DOT_PRODUCT with unit normalized vectors instead of - // COSINE distance, which is mathematically equivalent with better - // performance. See [Cosine + // COSINE distance compares vectors based on the angle between them, which + // allows you to measure similarity that isn't based on the vectors + // magnitude. We recommend using DOT_PRODUCT with unit normalized vectors + // instead of COSINE distance, which is mathematically equivalent with + // better performance. See [Cosine // Similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to learn - // more. + // more about COSINE similarity and COSINE distance. The resulting + // COSINE distance decreases the more similar two vectors are. StructuredQuery_FindNearest_COSINE StructuredQuery_FindNearest_DistanceMeasure = 2 // Similar to cosine but is affected by the magnitude of the vectors. See // [Dot Product](https://en.wikipedia.org/wiki/Dot_product) to learn more. + // The resulting distance increases the more similar two vectors are. StructuredQuery_FindNearest_DOT_PRODUCT StructuredQuery_FindNearest_DistanceMeasure = 3 ) @@ -511,7 +514,7 @@ type StructuredQuery struct { // // * The value must be greater than or equal to zero if specified. Limit *wrapperspb.Int32Value `protobuf:"bytes,5,opt,name=limit,proto3" json:"limit,omitempty"` - // Optional. A potential Nearest Neighbors Search. + // Optional. A potential nearest neighbors search. // // Applies after all other filters and ordering. // @@ -1298,7 +1301,10 @@ func (x *StructuredQuery_Projection) GetFields() []*StructuredQuery_FieldReferen return nil } -// Nearest Neighbors search config. +// Nearest Neighbors search config. The ordering provided by FindNearest +// supersedes the order_by stage. If multiple documents have the same vector +// distance, the returned document order is not guaranteed to be stable +// between queries. type StructuredQuery_FindNearest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1311,11 +1317,24 @@ type StructuredQuery_FindNearest struct { // Required. The query vector that we are searching on. Must be a vector of // no more than 2048 dimensions. QueryVector *Value `protobuf:"bytes,2,opt,name=query_vector,json=queryVector,proto3" json:"query_vector,omitempty"` - // Required. The Distance Measure to use, required. + // Required. The distance measure to use, required. DistanceMeasure StructuredQuery_FindNearest_DistanceMeasure `protobuf:"varint,3,opt,name=distance_measure,json=distanceMeasure,proto3,enum=google.firestore.v1.StructuredQuery_FindNearest_DistanceMeasure" json:"distance_measure,omitempty"` // Required. The number of nearest neighbors to return. Must be a positive // integer of no more than 1000. Limit *wrapperspb.Int32Value `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"` + // Optional. Optional name of the field to output the result of the vector + // distance calculation. Must conform to [document field + // name][google.firestore.v1.Document.fields] limitations. + DistanceResultField string `protobuf:"bytes,5,opt,name=distance_result_field,json=distanceResultField,proto3" json:"distance_result_field,omitempty"` + // Optional. Option to specify a threshold for which no less similar + // documents will be returned. The behavior of the specified + // `distance_measure` will affect the meaning of the distance threshold. + // Since DOT_PRODUCT distances increase when the vectors are more similar, + // the comparison is inverted. + // + // For EUCLIDEAN, COSINE: WHERE distance <= distance_threshold + // For DOT_PRODUCT: WHERE distance >= distance_threshold + DistanceThreshold *wrapperspb.DoubleValue `protobuf:"bytes,6,opt,name=distance_threshold,json=distanceThreshold,proto3" json:"distance_threshold,omitempty"` } func (x *StructuredQuery_FindNearest) Reset() { @@ -1378,6 +1397,20 @@ func (x *StructuredQuery_FindNearest) GetLimit() *wrapperspb.Int32Value { return nil } +func (x *StructuredQuery_FindNearest) GetDistanceResultField() string { + if x != nil { + return x.DistanceResultField + } + return "" +} + +func (x *StructuredQuery_FindNearest) GetDistanceThreshold() *wrapperspb.DoubleValue { + if x != nil { + return x.DistanceThreshold + } + return nil +} + // Defines an aggregation that produces a single result. type StructuredAggregationQuery_Aggregation struct { state protoimpl.MessageState @@ -1741,7 +1774,7 @@ var file_google_firestore_v1_query_proto_rawDesc = []byte{ 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x15, 0x0a, 0x0f, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb4, 0x16, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x47, 0x0a, 0x06, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, @@ -1880,7 +1913,7 @@ var file_google_firestore_v1_query_proto_rawDesc = []byte{ 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x1a, 0xb9, 0x03, 0x0a, 0x0b, 0x46, 0x69, 0x6e, 0x64, 0x4e, 0x65, 0x61, + 0x65, 0x6c, 0x64, 0x73, 0x1a, 0xc4, 0x04, 0x0a, 0x0b, 0x46, 0x69, 0x6e, 0x64, 0x4e, 0x65, 0x61, 0x72, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x0c, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, @@ -1901,84 +1934,93 @@ var file_google_firestore_v1_query_proto_rawDesc = []byte{ 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, - 0x5f, 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x61, 0x73, 0x75, - 0x72, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x49, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, - 0x45, 0x41, 0x53, 0x55, 0x52, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x45, 0x55, 0x43, 0x4c, 0x49, 0x44, 0x45, 0x41, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x53, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, - 0x0f, 0x0a, 0x0b, 0x44, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x54, 0x10, 0x03, - 0x22, 0x45, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, - 0x15, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x53, 0x43, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x53, 0x43, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x83, 0x06, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x51, 0x0a, 0x10, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x75, 0x72, 0x65, 0x64, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, - 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x0c, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, - 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, - 0x9d, 0x04, 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x59, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x03, 0x73, 0x75, - 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x37, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x13, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x50, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x22, 0x5f, 0x0a, 0x0f, 0x44, 0x69, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x12, 0x20, 0x0a, + 0x1c, 0x44, 0x49, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x53, 0x55, 0x52, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x45, 0x55, 0x43, 0x4c, 0x49, 0x44, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x43, 0x4f, 0x53, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x4f, + 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x54, 0x10, 0x03, 0x22, 0x45, 0x0a, 0x09, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, + 0x10, 0x02, 0x22, 0x83, 0x06, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, + 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x51, 0x0a, 0x10, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x5f, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x9d, 0x04, 0x0a, 0x0b, 0x41, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x05, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, + 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x53, 0x0a, 0x03, 0x61, 0x76, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, - 0x53, 0x0a, 0x03, 0x61, 0x76, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x76, 0x67, 0x48, 0x00, 0x52, - 0x03, 0x61, 0x76, 0x67, 0x12, 0x19, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x1a, - 0x3e, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x05, 0x75, 0x70, 0x5f, 0x74, - 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x75, 0x70, 0x54, 0x6f, 0x1a, - 0x50, 0x0a, 0x03, 0x53, 0x75, 0x6d, 0x12, 0x49, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, - 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x1a, 0x50, 0x0a, 0x03, 0x41, 0x76, 0x67, 0x12, 0x49, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x42, 0x0a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, - 0x0c, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x54, 0x0a, - 0x06, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x62, 0x65, 0x66, - 0x6f, 0x72, 0x65, 0x42, 0xc2, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, - 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x67, 0x6f, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, - 0x76, 0x31, 0x2f, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, 0x62, 0x3b, 0x66, - 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x43, 0x46, - 0x53, 0xaa, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x19, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x46, 0x69, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1c, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x46, 0x69, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x76, 0x67, 0x48, 0x00, 0x52, 0x03, 0x61, 0x76, 0x67, 0x12, + 0x19, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x1a, 0x3e, 0x0a, 0x05, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x05, 0x75, 0x70, 0x5f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x75, 0x70, 0x54, 0x6f, 0x1a, 0x50, 0x0a, 0x03, 0x53, 0x75, + 0x6d, 0x12, 0x49, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, + 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x50, 0x0a, 0x03, + 0x41, 0x76, 0x67, 0x12, 0x49, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, + 0x72, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x0a, + 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x0c, 0x0a, 0x0a, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x54, 0x0a, 0x06, 0x43, 0x75, 0x72, 0x73, + 0x6f, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x42, 0xc2, + 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x66, 0x69, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x69, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x66, 0x69, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x70, 0x62, 0x3b, 0x66, 0x69, 0x72, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x43, 0x46, 0x53, 0xaa, 0x02, 0x19, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x46, 0x69, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1c, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x46, 0x69, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, + 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2019,7 +2061,8 @@ var file_google_firestore_v1_query_proto_goTypes = []any{ (*StructuredAggregationQuery_Aggregation_Avg)(nil), // 20: google.firestore.v1.StructuredAggregationQuery.Aggregation.Avg (*wrapperspb.Int32Value)(nil), // 21: google.protobuf.Int32Value (*Value)(nil), // 22: google.firestore.v1.Value - (*wrapperspb.Int64Value)(nil), // 23: google.protobuf.Int64Value + (*wrapperspb.DoubleValue)(nil), // 23: google.protobuf.DoubleValue + (*wrapperspb.Int64Value)(nil), // 24: google.protobuf.Int64Value } var file_google_firestore_v1_query_proto_depIdxs = []int32{ 15, // 0: google.firestore.v1.StructuredQuery.select:type_name -> google.firestore.v1.StructuredQuery.Projection @@ -2050,17 +2093,18 @@ var file_google_firestore_v1_query_proto_depIdxs = []int32{ 22, // 25: google.firestore.v1.StructuredQuery.FindNearest.query_vector:type_name -> google.firestore.v1.Value 4, // 26: google.firestore.v1.StructuredQuery.FindNearest.distance_measure:type_name -> google.firestore.v1.StructuredQuery.FindNearest.DistanceMeasure 21, // 27: google.firestore.v1.StructuredQuery.FindNearest.limit:type_name -> google.protobuf.Int32Value - 18, // 28: google.firestore.v1.StructuredAggregationQuery.Aggregation.count:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Count - 19, // 29: google.firestore.v1.StructuredAggregationQuery.Aggregation.sum:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Sum - 20, // 30: google.firestore.v1.StructuredAggregationQuery.Aggregation.avg:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Avg - 23, // 31: google.firestore.v1.StructuredAggregationQuery.Aggregation.Count.up_to:type_name -> google.protobuf.Int64Value - 14, // 32: google.firestore.v1.StructuredAggregationQuery.Aggregation.Sum.field:type_name -> google.firestore.v1.StructuredQuery.FieldReference - 14, // 33: google.firestore.v1.StructuredAggregationQuery.Aggregation.Avg.field:type_name -> google.firestore.v1.StructuredQuery.FieldReference - 34, // [34:34] is the sub-list for method output_type - 34, // [34:34] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 23, // 28: google.firestore.v1.StructuredQuery.FindNearest.distance_threshold:type_name -> google.protobuf.DoubleValue + 18, // 29: google.firestore.v1.StructuredAggregationQuery.Aggregation.count:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Count + 19, // 30: google.firestore.v1.StructuredAggregationQuery.Aggregation.sum:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Sum + 20, // 31: google.firestore.v1.StructuredAggregationQuery.Aggregation.avg:type_name -> google.firestore.v1.StructuredAggregationQuery.Aggregation.Avg + 24, // 32: google.firestore.v1.StructuredAggregationQuery.Aggregation.Count.up_to:type_name -> google.protobuf.Int64Value + 14, // 33: google.firestore.v1.StructuredAggregationQuery.Aggregation.Sum.field:type_name -> google.firestore.v1.StructuredQuery.FieldReference + 14, // 34: google.firestore.v1.StructuredAggregationQuery.Aggregation.Avg.field:type_name -> google.firestore.v1.StructuredQuery.FieldReference + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 35, // [35:35] is the sub-list for extension extendee + 0, // [0:35] is the sub-list for field type_name } func init() { file_google_firestore_v1_query_proto_init() } diff --git a/vendor/cloud.google.com/go/firestore/internal/version.go b/vendor/cloud.google.com/go/firestore/internal/version.go index b82e17a19..6607c695d 100644 --- a/vendor/cloud.google.com/go/firestore/internal/version.go +++ b/vendor/cloud.google.com/go/firestore/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.16.0" +const Version = "1.17.0" diff --git a/vendor/cloud.google.com/go/go.work b/vendor/cloud.google.com/go/go.work index 8f27dc5b1..122a980e6 100644 --- a/vendor/cloud.google.com/go/go.work +++ b/vendor/cloud.google.com/go/go.work @@ -1,4 +1,6 @@ -go 1.20 +go 1.21.13 + +toolchain go1.23.0 use ( . @@ -12,6 +14,7 @@ use ( ./apigateway ./apigeeconnect ./apigeeregistry + ./apihub ./apikeys ./appengine ./apphub diff --git a/vendor/cloud.google.com/go/go.work.sum b/vendor/cloud.google.com/go/go.work.sum index 9c215e269..ed5eb16ae 100644 --- a/vendor/cloud.google.com/go/go.work.sum +++ b/vendor/cloud.google.com/go/go.work.sum @@ -4,12 +4,10 @@ cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3Y cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.46.0/go.mod h1:V28hx+cUCZC9e3qcqszMb+Sbt8cQZtHTiXOmyDzoDOg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4= @@ -29,14 +27,11 @@ github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0V github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= -github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0/go.mod h1:pMYMxVaKJqCDC1JUg/XbPJ4/fSazB25zORpFzqsIGIc= @@ -55,14 +50,12 @@ github.com/miekg/dns v1.1.33/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw= +go.opentelemetry.io/contrib/detectors/gcp v1.27.0/go.mod h1:amd+4uZxqJAUx7zI1JvygUtAc2EVWtQeyz8D+3161SQ= go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= go.opentelemetry.io/otel/bridge/opencensus v0.40.0 h1:pqDiayRhBgoqy1vwnscik+TizcImJ58l053NScJyZso= go.opentelemetry.io/otel/bridge/opencensus v0.40.0/go.mod h1:1NvVHb6tLTe5A9qCYz+eErW0t8iPn4ZfR6tDKcqlGTM= @@ -70,6 +63,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= @@ -79,8 +74,6 @@ golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg= -google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= -google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= google.golang.org/genproto v0.0.0-20230725213213-b022f6e96895/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto/googleapis/api v0.0.0-20230725213213-b022f6e96895/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 63d8364fc..498a15a5f 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,20 @@ # Changes +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) + + +### Features + +* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + ## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08) diff --git a/vendor/cloud.google.com/go/iam/credentials/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/iam/credentials/apiv1/auxiliary_go123.go new file mode 100644 index 000000000..159da92cb --- /dev/null +++ b/vendor/cloud.google.com/go/iam/credentials/apiv1/auxiliary_go123.go @@ -0,0 +1,19 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package credentials diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 54acae7cd..6b58b6a6f 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -179,6 +179,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/apihub/apiv1": { + "api_shortname": "apihub", + "distribution_name": "cloud.google.com/go/apihub/apiv1", + "description": "API hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apihub/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/apikeys/apiv2": { "api_shortname": "apikeys", "distribution_name": "cloud.google.com/go/apikeys/apiv2", @@ -559,6 +569,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/bigquery/storage/apiv1alpha": { + "api_shortname": "bigquerystorage", + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha", + "description": "BigQuery Storage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", @@ -1009,6 +1029,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/datastore/apiv1": { + "api_shortname": "datastore", + "distribution_name": "cloud.google.com/go/datastore/apiv1", + "description": "Cloud Datastore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/datastream/apiv1": { "api_shortname": "datastream", "distribution_name": "cloud.google.com/go/datastream/apiv1", @@ -1329,6 +1359,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/gkeconnect/gateway/apiv1": { + "api_shortname": "connectgateway", + "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1", + "description": "Connect Gateway API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { "api_shortname": "connectgateway", "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", @@ -1352,7 +1392,7 @@ "cloud.google.com/go/gkemulticloud/apiv1": { "api_shortname": "gkemulticloud", "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", - "description": "Anthos Multi-Cloud API", + "description": "GKE Multi-Cloud API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", @@ -1552,7 +1592,7 @@ "cloud.google.com/go/managedkafka/apiv1": { "api_shortname": "managedkafka", "distribution_name": "cloud.google.com/go/managedkafka/apiv1", - "description": "Apache Kafka for BigQuery API", + "description": "Managed Service for Apache Kafka API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", @@ -1569,6 +1609,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/areainsights/apiv1": { + "api_shortname": "areainsights", + "distribution_name": "cloud.google.com/go/maps/areainsights/apiv1", + "description": "Places Insights API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/areainsights/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/fleetengine/apiv1": { "api_shortname": "fleetengine", "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go index fa01cf3dd..16440fbc7 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go @@ -189,7 +189,8 @@ type internalAutokeyAdminClient interface { // AutokeyAdminClient is a client for interacting with Cloud Key Management Service (KMS) API. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // -// Provides interfaces for managing Cloud KMS Autokey folder-level +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level // configurations. A configuration is inherited by all descendent projects. A // configuration at one folder overrides any other configurations in its // ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS @@ -316,7 +317,8 @@ type autokeyAdminGRPCClient struct { // NewAutokeyAdminClient creates a new autokey admin client based on gRPC. // The returned client must be Closed when it is done being used to clean up its underlying connections. // -// Provides interfaces for managing Cloud KMS Autokey folder-level +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level // configurations. A configuration is inherited by all descendent projects. A // configuration at one folder overrides any other configurations in its // ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS @@ -396,7 +398,8 @@ type autokeyAdminRESTClient struct { // NewAutokeyAdminRESTClient creates a new autokey admin rest client. // -// Provides interfaces for managing Cloud KMS Autokey folder-level +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level // configurations. A configuration is inherited by all descendent projects. A // configuration at one folder overrides any other configurations in its // ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS @@ -672,11 +675,11 @@ func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *k params := url.Values{} params.Add("$alt", "json;enum-encoding=int") if req.GetUpdateMask() != nil { - updateMask, err := protojson.Marshal(req.GetUpdateMask()) + field, err := protojson.Marshal(req.GetUpdateMask()) if err != nil { return nil, err } - params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + params.Add("updateMask", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go index c11e3ad22..0cd015059 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go @@ -161,7 +161,7 @@ type internalAutokeyClient interface { CreateKeyHandle(context.Context, *kmspb.CreateKeyHandleRequest, ...gax.CallOption) (*CreateKeyHandleOperation, error) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation GetKeyHandle(context.Context, *kmspb.GetKeyHandleRequest, ...gax.CallOption) (*kmspb.KeyHandle, error) - ListKeyHandles(context.Context, *kmspb.ListKeyHandlesRequest, ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) + ListKeyHandles(context.Context, *kmspb.ListKeyHandlesRequest, ...gax.CallOption) *KeyHandleIterator GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) @@ -173,7 +173,8 @@ type internalAutokeyClient interface { // AutokeyClient is a client for interacting with Cloud Key Management Service (KMS) API. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // -// Provides interfaces for using Cloud KMS Autokey to provision new +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new // CryptoKeys, ready for Customer Managed // Encryption Key (CMEK) use, on-demand. To support certain client tooling, this // feature is modeled around a KeyHandle @@ -249,7 +250,7 @@ func (c *AutokeyClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandl } // ListKeyHandles lists KeyHandles. -func (c *AutokeyClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) { +func (c *AutokeyClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { return c.internalClient.ListKeyHandles(ctx, req, opts...) } @@ -325,7 +326,8 @@ type autokeyGRPCClient struct { // NewAutokeyClient creates a new autokey client based on gRPC. // The returned client must be Closed when it is done being used to clean up its underlying connections. // -// Provides interfaces for using Cloud KMS Autokey to provision new +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new // CryptoKeys, ready for Customer Managed // Encryption Key (CMEK) use, on-demand. To support certain client tooling, this // feature is modeled around a KeyHandle @@ -431,7 +433,8 @@ type autokeyRESTClient struct { // NewAutokeyRESTClient creates a new autokey rest client. // -// Provides interfaces for using Cloud KMS Autokey to provision new +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new // CryptoKeys, ready for Customer Managed // Encryption Key (CMEK) use, on-demand. To support certain client tooling, this // feature is modeled around a KeyHandle @@ -551,22 +554,50 @@ func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH return resp, nil } -func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) { +func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} hds = append(c.xGoogHeaders, hds...) ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...) - var resp *kmspb.ListKeyHandlesResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err + it := &KeyHandleIterator{} + req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) { + resp := &kmspb.ListKeyHandlesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetKeyHandles(), resp.GetNextPageToken(), nil } - return resp, nil + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it } func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { @@ -846,66 +877,95 @@ func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH } // ListKeyHandles lists KeyHandles. -func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) { - baseUrl, err := url.Parse(c.endpoint) - if err != nil { - return nil, err - } - baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent()) - - params := url.Values{} - params.Add("$alt", "json;enum-encoding=int") - if req.GetFilter() != "" { - params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) - } - - baseUrl.RawQuery = params.Encode() - - // Build HTTP headers from client and context metadata. - hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} - - hds = append(c.xGoogHeaders, hds...) - hds = append(hds, "Content-Type", "application/json") - headers := gax.BuildHeaders(ctx, hds...) - opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...) +func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { + it := &KeyHandleIterator{} + req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} - resp := &kmspb.ListKeyHandlesResponse{} - e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - if settings.Path != "" { - baseUrl.Path = settings.Path + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) { + resp := &kmspb.ListKeyHandlesResponse{} + if pageToken != "" { + req.PageToken = pageToken } - httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) if err != nil { - return err + return nil, "", err } - httpReq = httpReq.WithContext(ctx) - httpReq.Header = headers + baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent()) - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) } - defer httpRsp.Body.Close() - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e } + it.Response = resp + return resp.GetKeyHandles(), resp.GetNextPageToken(), nil + } - buf, err := io.ReadAll(httpRsp.Body) + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { - return err + return "", err } + it.items = append(it.items, items...) + return nextPageToken, nil + } - if err := unm.Unmarshal(buf, resp); err != nil { - return err - } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() - return nil - }, opts...) - if e != nil { - return nil, e - } - return resp, nil + return it } // GetLocation gets information about a location. diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go index 0ca75b22f..fa594ed81 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go @@ -279,6 +279,53 @@ func (it *ImportJobIterator) takeBuf() interface{} { return b } +// KeyHandleIterator manages a stream of *kmspb.KeyHandle. +type KeyHandleIterator struct { + items []*kmspb.KeyHandle + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyHandle, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *KeyHandleIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *KeyHandleIterator) Next() (*kmspb.KeyHandle, error) { + var item *kmspb.KeyHandle + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *KeyHandleIterator) bufLen() int { + return len(it.items) +} + +func (it *KeyHandleIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + // KeyRingIterator manages a stream of *kmspb.KeyRing. type KeyRingIterator struct { items []*kmspb.KeyRing diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go new file mode 100644 index 000000000..7a8043ee9 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go @@ -0,0 +1,69 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package kms + +import ( + "iter" + + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + "github.com/googleapis/gax-go/v2/iterator" + locationpb "google.golang.org/genproto/googleapis/cloud/location" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CryptoKeyIterator) All() iter.Seq2[*kmspb.CryptoKey, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CryptoKeyVersionIterator) All() iter.Seq2[*kmspb.CryptoKeyVersion, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *EkmConnectionIterator) All() iter.Seq2[*kmspb.EkmConnection, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ImportJobIterator) All() iter.Seq2[*kmspb.ImportJob, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *KeyHandleIterator) All() iter.Seq2[*kmspb.KeyHandle, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *KeyRingIterator) All() iter.Seq2[*kmspb.KeyRing, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *LocationIterator) All() iter.Seq2[*locationpb.Location, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go index 6d1856c9e..411a37084 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -1048,11 +1048,11 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda params := url.Values{} params.Add("$alt", "json;enum-encoding=int") if req.GetUpdateMask() != nil { - updateMask, err := protojson.Marshal(req.GetUpdateMask()) + field, err := protojson.Marshal(req.GetUpdateMask()) if err != nil { return nil, err } - params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + params.Add("updateMask", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() @@ -1184,11 +1184,11 @@ func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk params := url.Values{} params.Add("$alt", "json;enum-encoding=int") if req.GetUpdateMask() != nil { - updateMask, err := protojson.Marshal(req.GetUpdateMask()) + field, err := protojson.Marshal(req.GetUpdateMask()) if err != nil { return nil, err } - params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + params.Add("updateMask", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go index 7b3492e91..285df9b61 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -3119,11 +3119,11 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp params := url.Values{} params.Add("$alt", "json;enum-encoding=int") if req.GetUpdateMask() != nil { - updateMask, err := protojson.Marshal(req.GetUpdateMask()) + field, err := protojson.Marshal(req.GetUpdateMask()) if err != nil { return nil, err } - params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + params.Add("updateMask", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() @@ -3204,11 +3204,11 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re params := url.Values{} params.Add("$alt", "json;enum-encoding=int") if req.GetUpdateMask() != nil { - updateMask, err := protojson.Marshal(req.GetUpdateMask()) + field, err := protojson.Marshal(req.GetUpdateMask()) if err != nil { return nil, err } - params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + params.Add("updateMask", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go index 130d16385..4b2a60c2c 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go @@ -299,6 +299,18 @@ type ListKeyHandlesRequest struct { // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g. // `projects/{PROJECT_ID}/locations/{LOCATION}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [KeyHandles][google.cloud.kms.v1.KeyHandle] to include in the response. The + // service may return fewer than this value. Further + // [KeyHandles][google.cloud.kms.v1.KeyHandle] can subsequently be obtained by + // including the + // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token] + // in a subsequent request. If unspecified, at most + // 100 [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` // Optional. Filter to apply when listing // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g. // `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`. @@ -344,6 +356,20 @@ func (x *ListKeyHandlesRequest) GetParent() string { return "" } +func (x *ListKeyHandlesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListKeyHandlesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + func (x *ListKeyHandlesRequest) GetFilter() string { if x != nil { return x.Filter @@ -360,6 +386,10 @@ type ListKeyHandlesResponse struct { // Resulting [KeyHandles][google.cloud.kms.v1.KeyHandle]. KeyHandles []*KeyHandle `protobuf:"bytes,1,rep,name=key_handles,json=keyHandles,proto3" json:"key_handles,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListKeyHandlesRequest.page_token][google.cloud.kms.v1.ListKeyHandlesRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } func (x *ListKeyHandlesResponse) Reset() { @@ -401,6 +431,13 @@ func (x *ListKeyHandlesResponse) GetKeyHandles() []*KeyHandle { return nil } +func (x *ListKeyHandlesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + var File_google_cloud_kms_v1_autokey_proto protoreflect.FileDescriptor var file_google_cloud_kms_v1_autokey_proto_rawDesc = []byte{ @@ -455,20 +492,27 @@ var file_google_cloud_kms_v1_autokey_proto_rawDesc = []byte{ 0x2a, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0x09, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x77, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x16, 0x4c, - 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0xb4, 0x05, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x6f, 0x6b, + 0x74, 0x61, 0x22, 0xbd, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x22, 0x81, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, + 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb4, 0x05, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x12, 0xeb, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go index 27047a2df..0d612bba5 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go @@ -41,6 +41,65 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The states AutokeyConfig can be in. +type AutokeyConfig_State int32 + +const ( + // The state of the AutokeyConfig is unspecified. + AutokeyConfig_STATE_UNSPECIFIED AutokeyConfig_State = 0 + // The AutokeyConfig is currently active. + AutokeyConfig_ACTIVE AutokeyConfig_State = 1 + // A previously configured key project has been deleted and the current + // AutokeyConfig is unusable. + AutokeyConfig_KEY_PROJECT_DELETED AutokeyConfig_State = 2 + // The AutokeyConfig is not yet initialized or has been reset to its default + // uninitialized state. + AutokeyConfig_UNINITIALIZED AutokeyConfig_State = 3 +) + +// Enum value maps for AutokeyConfig_State. +var ( + AutokeyConfig_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "KEY_PROJECT_DELETED", + 3: "UNINITIALIZED", + } + AutokeyConfig_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "KEY_PROJECT_DELETED": 2, + "UNINITIALIZED": 3, + } +) + +func (x AutokeyConfig_State) Enum() *AutokeyConfig_State { + p := new(AutokeyConfig_State) + *p = x + return p +} + +func (x AutokeyConfig_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AutokeyConfig_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0].Descriptor() +} + +func (AutokeyConfig_State) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0] +} + +func (x AutokeyConfig_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AutokeyConfig_State.Descriptor instead. +func (AutokeyConfig_State) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2, 0} +} + // Request message for // [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig]. type UpdateAutokeyConfigRequest struct { @@ -175,6 +234,8 @@ type AutokeyConfig struct { // `cloudkms.admin` role (or pertinent permissions). A request with an empty // key project field will clear the configuration. KeyProject string `protobuf:"bytes,2,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"` + // Output only. The state for the AutokeyConfig. + State AutokeyConfig_State `protobuf:"varint,4,opt,name=state,proto3,enum=google.cloud.kms.v1.AutokeyConfig_State" json:"state,omitempty"` } func (x *AutokeyConfig) Reset() { @@ -223,6 +284,13 @@ func (x *AutokeyConfig) GetKeyProject() string { return "" } +func (x *AutokeyConfig) GetState() AutokeyConfig_State { + if x != nil { + return x.State + } + return AutokeyConfig_STATE_UNSPECIFIED +} + // Request message for // [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig]. type ShowEffectiveAutokeyConfigRequest struct { @@ -359,81 +427,91 @@ var file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = []byte{ 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0xb9, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, + 0x6d, 0x65, 0x22, 0xd6, 0x02, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x3a, 0x69, 0xea, 0x41, 0x66, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, - 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x1e, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, - 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, - 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x32, - 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x70, - 0x0a, 0x21, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, - 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x22, 0x45, 0x0a, 0x22, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x32, 0xc8, 0x05, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x6f, - 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xd2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, - 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x66, 0xda, 0x41, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, - 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x0e, 0x61, 0x75, 0x74, 0x6f, - 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x31, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, - 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x97, 0x01, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, + 0x65, 0x63, 0x74, 0x12, 0x43, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x56, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x4a, + 0x45, 0x43, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, + 0x0d, 0x55, 0x4e, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x03, + 0x3a, 0x69, 0xea, 0x41, 0x66, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0x0e, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x32, 0x0d, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x70, 0x0a, 0x21, 0x53, + 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, - 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xd2, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x6f, 0x77, - 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, - 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, + 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x45, 0x0a, + 0x22, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x32, 0xc8, 0x05, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xd2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x3a, 0x73, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, - 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x74, 0xca, 0x41, - 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, - 0x6d, 0x73, 0x42, 0x59, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x41, - 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, - 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x66, 0xda, 0x41, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x97, 0x01, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, + 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x7d, 0x12, 0xd2, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, + 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, + 0x59, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x41, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, + 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -448,29 +526,32 @@ func file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP() []byte { return file_google_cloud_kms_v1_autokey_admin_proto_rawDescData } +var file_google_cloud_kms_v1_autokey_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_cloud_kms_v1_autokey_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_google_cloud_kms_v1_autokey_admin_proto_goTypes = []any{ - (*UpdateAutokeyConfigRequest)(nil), // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest - (*GetAutokeyConfigRequest)(nil), // 1: google.cloud.kms.v1.GetAutokeyConfigRequest - (*AutokeyConfig)(nil), // 2: google.cloud.kms.v1.AutokeyConfig - (*ShowEffectiveAutokeyConfigRequest)(nil), // 3: google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest - (*ShowEffectiveAutokeyConfigResponse)(nil), // 4: google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse - (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask + (AutokeyConfig_State)(0), // 0: google.cloud.kms.v1.AutokeyConfig.State + (*UpdateAutokeyConfigRequest)(nil), // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest + (*GetAutokeyConfigRequest)(nil), // 2: google.cloud.kms.v1.GetAutokeyConfigRequest + (*AutokeyConfig)(nil), // 3: google.cloud.kms.v1.AutokeyConfig + (*ShowEffectiveAutokeyConfigRequest)(nil), // 4: google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest + (*ShowEffectiveAutokeyConfigResponse)(nil), // 5: google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask } var file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = []int32{ - 2, // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest.autokey_config:type_name -> google.cloud.kms.v1.AutokeyConfig - 5, // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest.update_mask:type_name -> google.protobuf.FieldMask - 0, // 2: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:input_type -> google.cloud.kms.v1.UpdateAutokeyConfigRequest - 1, // 3: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:input_type -> google.cloud.kms.v1.GetAutokeyConfigRequest - 3, // 4: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:input_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest - 2, // 5: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig - 2, // 6: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig - 4, // 7: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:output_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse - 5, // [5:8] is the sub-list for method output_type - 2, // [2:5] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest.autokey_config:type_name -> google.cloud.kms.v1.AutokeyConfig + 6, // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 2: google.cloud.kms.v1.AutokeyConfig.state:type_name -> google.cloud.kms.v1.AutokeyConfig.State + 1, // 3: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:input_type -> google.cloud.kms.v1.UpdateAutokeyConfigRequest + 2, // 4: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:input_type -> google.cloud.kms.v1.GetAutokeyConfigRequest + 4, // 5: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:input_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest + 3, // 6: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig + 3, // 7: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig + 5, // 8: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:output_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_google_cloud_kms_v1_autokey_admin_proto_init() } @@ -545,13 +626,14 @@ func file_google_cloud_kms_v1_autokey_admin_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_cloud_kms_v1_autokey_admin_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 5, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_cloud_kms_v1_autokey_admin_proto_goTypes, DependencyIndexes: file_google_cloud_kms_v1_autokey_admin_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_autokey_admin_proto_enumTypes, MessageInfos: file_google_cloud_kms_v1_autokey_admin_proto_msgTypes, }.Build() File_google_cloud_kms_v1_autokey_admin_proto = out.File diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go index d1efef5f3..eaa41d032 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -737,7 +737,7 @@ type EkmConnection struct { // Output only. The time at which the // [EkmConnection][google.cloud.kms.v1.EkmConnection] was created. CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // A list of + // Optional. A list of // [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where // the EKM can be reached. There should be one ServiceResolver per EKM // replica. Currently, only a single @@ -1197,195 +1197,195 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, - 0xf2, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0xf7, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5f, 0x0a, 0x11, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x10, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, - 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, - 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x11, 0x4b, 0x65, - 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, - 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, 0x55, 0x41, 0x4c, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, 0x4d, 0x53, 0x10, 0x02, 0x3a, - 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, - 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x01, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b, + 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, + 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, + 0x53, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41, + 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, + 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, + 0x4d, 0x53, 0x10, 0x02, 0x3a, 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x47, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x63, 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2d, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x5e, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, - 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5e, 0x0a, 0x19, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc, 0x0b, 0x0a, 0x0a, 0x45, 0x6b, - 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, - 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc, + 0x0b, 0x0a, 0x0a, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, + 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x74, 0xda, 0x41, 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, + 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x44, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, - 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, - 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, - 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0xda, 0x41, - 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0e, - 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x32, - 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, + 0x0c, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x7d, 0x12, 0xc3, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x22, 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x54, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, + 0x12, 0x45, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x45, - 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xc3, - 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, - 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, - 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, - 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x36, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x12, 0x45, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, - 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, 0xea, 0x41, 0x7c, 0x0a, 0x27, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, - 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, - 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, - 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, + 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a, + 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, + 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, + 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go index a1782b931..74d2c1f46 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -1073,7 +1073,7 @@ type CryptoKey struct { // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] // state before transitioning to // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. - // If not specified at creation time, the default duration is 24 hours. + // If not specified at creation time, the default duration is 30 days. DestroyScheduledDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=destroy_scheduled_duration,json=destroyScheduledDuration,proto3" json:"destroy_scheduled_duration,omitempty"` // Immutable. The resource name of the backend environment where the key // material for all [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go index e2bf64783..0b0f0e914 100644 --- a/vendor/cloud.google.com/go/kms/internal/version.go +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.18.5" +const Version = "1.20.0" diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md index 630a1b9a6..d120456cd 100644 --- a/vendor/cloud.google.com/go/longrunning/CHANGES.md +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.0...longrunning/v0.6.1) (2024-09-12) + + +### Bug Fixes + +* **longrunning:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.12...longrunning/v0.6.0) (2024-08-20) + + +### Features + +* **longrunning:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + ## [0.5.12](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.11...longrunning/v0.5.12) (2024-08-08) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go new file mode 100644 index 000000000..eca6d4def --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go @@ -0,0 +1,32 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package longrunning + +import ( + "iter" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationIterator) All() iter.Seq2[*longrunningpb.Operation, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index 74ea89101..3be65a155 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -821,11 +821,11 @@ func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunni params.Add("name", fmt.Sprintf("%v", req.GetName())) } if req.GetTimeout() != nil { - timeout, err := protojson.Marshal(req.GetTimeout()) + field, err := protojson.Marshal(req.GetTimeout()) if err != nil { return nil, err } - params.Add("timeout", string(timeout[1:len(timeout)-1])) + params.Add("timeout", string(field[1:len(field)-1])) } baseUrl.RawQuery = params.Encode() diff --git a/vendor/cloud.google.com/go/monitoring/LICENSE b/vendor/cloud.google.com/go/monitoring/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go new file mode 100644 index 000000000..ae1dd6b9a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -0,0 +1,399 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newAlertPolicyClientHook clientHook + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + return &AlertPolicyCallOptions{ + ListAlertPolicies: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API. +type internalAlertPolicyClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator + GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error + UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) +} + +// AlertPolicyClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +type AlertPolicyClient struct { + // The internal transport-dependent client. + internalClient internalAlertPolicyClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListAlertPolicies lists the existing alerting policies for the workspace. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + return c.internalClient.ListAlertPolicies(ctx, req, opts...) +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.GetAlertPolicy(ctx, req, opts...) +} + +// CreateAlertPolicy creates a new alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.CreateAlertPolicy(ctx, req, opts...) +} + +// DeleteAlertPolicy deletes an alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteAlertPolicy(ctx, req, opts...) +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.UpdateAlertPolicy(ctx, req, opts...) +} + +// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type alertPolicyGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing AlertPolicyClient + CallOptions **AlertPolicyCallOptions + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewAlertPolicyClient creates a new alert policy service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + clientOpts := defaultAlertPolicyGRPCClientOptions() + if newAlertPolicyClientHook != nil { + hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()} + + c := &alertPolicyGRPCClient{ + connPool: connPool, + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *alertPolicyGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + resp := &monitoringpb.ListAlertPoliciesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go new file mode 100644 index 000000000..4de74e773 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go @@ -0,0 +1,682 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceIterator manages a stream of *monitoringpb.Service. +type ServiceIterator struct { + items []*monitoringpb.Service + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceIterator) Next() (*monitoringpb.Service, error) { + var item *monitoringpb.Service + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective. +type ServiceLevelObjectiveIterator struct { + items []*monitoringpb.ServiceLevelObjective + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) { + var item *monitoringpb.ServiceLevelObjective + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceLevelObjectiveIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SnoozeIterator manages a stream of *monitoringpb.Snooze. +type SnoozeIterator struct { + items []*monitoringpb.Snooze + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnoozeIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) { + var item *monitoringpb.Snooze + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnoozeIterator) bufLen() int { + return len(it.items) +} + +func (it *SnoozeIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData. +type TimeSeriesDataIterator struct { + items []*monitoringpb.TimeSeriesData + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) { + var item *monitoringpb.TimeSeriesData + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesDataIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesDataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go new file mode 100644 index 000000000..2982e1f84 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go @@ -0,0 +1,112 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package monitoring + +import ( + "iter" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/googleapis/gax-go/v2/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go new file mode 100644 index 000000000..e8c403647 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go @@ -0,0 +1,124 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Cloud Monitoring API. +// +// Manages your Cloud Monitoring data and configurations. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &monitoringpb.CreateAlertPolicyRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest. +// } +// resp, err := c.CreateAlertPolicy(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewAlertPolicyClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json new file mode 100644 index 000000000..a33cb6fcf --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json @@ -0,0 +1,336 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.monitoring.v3", + "libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2", + "services": { + "AlertPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AlertPolicyClient", + "rpcs": { + "CreateAlertPolicy": { + "methods": [ + "CreateAlertPolicy" + ] + }, + "DeleteAlertPolicy": { + "methods": [ + "DeleteAlertPolicy" + ] + }, + "GetAlertPolicy": { + "methods": [ + "GetAlertPolicy" + ] + }, + "ListAlertPolicies": { + "methods": [ + "ListAlertPolicies" + ] + }, + "UpdateAlertPolicy": { + "methods": [ + "UpdateAlertPolicy" + ] + } + } + } + } + }, + "GroupService": { + "clients": { + "grpc": { + "libraryClient": "GroupClient", + "rpcs": { + "CreateGroup": { + "methods": [ + "CreateGroup" + ] + }, + "DeleteGroup": { + "methods": [ + "DeleteGroup" + ] + }, + "GetGroup": { + "methods": [ + "GetGroup" + ] + }, + "ListGroupMembers": { + "methods": [ + "ListGroupMembers" + ] + }, + "ListGroups": { + "methods": [ + "ListGroups" + ] + }, + "UpdateGroup": { + "methods": [ + "UpdateGroup" + ] + } + } + } + } + }, + "MetricService": { + "clients": { + "grpc": { + "libraryClient": "MetricClient", + "rpcs": { + "CreateMetricDescriptor": { + "methods": [ + "CreateMetricDescriptor" + ] + }, + "CreateServiceTimeSeries": { + "methods": [ + "CreateServiceTimeSeries" + ] + }, + "CreateTimeSeries": { + "methods": [ + "CreateTimeSeries" + ] + }, + "DeleteMetricDescriptor": { + "methods": [ + "DeleteMetricDescriptor" + ] + }, + "GetMetricDescriptor": { + "methods": [ + "GetMetricDescriptor" + ] + }, + "GetMonitoredResourceDescriptor": { + "methods": [ + "GetMonitoredResourceDescriptor" + ] + }, + "ListMetricDescriptors": { + "methods": [ + "ListMetricDescriptors" + ] + }, + "ListMonitoredResourceDescriptors": { + "methods": [ + "ListMonitoredResourceDescriptors" + ] + }, + "ListTimeSeries": { + "methods": [ + "ListTimeSeries" + ] + } + } + } + } + }, + "NotificationChannelService": { + "clients": { + "grpc": { + "libraryClient": "NotificationChannelClient", + "rpcs": { + "CreateNotificationChannel": { + "methods": [ + "CreateNotificationChannel" + ] + }, + "DeleteNotificationChannel": { + "methods": [ + "DeleteNotificationChannel" + ] + }, + "GetNotificationChannel": { + "methods": [ + "GetNotificationChannel" + ] + }, + "GetNotificationChannelDescriptor": { + "methods": [ + "GetNotificationChannelDescriptor" + ] + }, + "GetNotificationChannelVerificationCode": { + "methods": [ + "GetNotificationChannelVerificationCode" + ] + }, + "ListNotificationChannelDescriptors": { + "methods": [ + "ListNotificationChannelDescriptors" + ] + }, + "ListNotificationChannels": { + "methods": [ + "ListNotificationChannels" + ] + }, + "SendNotificationChannelVerificationCode": { + "methods": [ + "SendNotificationChannelVerificationCode" + ] + }, + "UpdateNotificationChannel": { + "methods": [ + "UpdateNotificationChannel" + ] + }, + "VerifyNotificationChannel": { + "methods": [ + "VerifyNotificationChannel" + ] + } + } + } + } + }, + "QueryService": { + "clients": { + "grpc": { + "libraryClient": "QueryClient", + "rpcs": { + "QueryTimeSeries": { + "methods": [ + "QueryTimeSeries" + ] + } + } + } + } + }, + "ServiceMonitoringService": { + "clients": { + "grpc": { + "libraryClient": "ServiceMonitoringClient", + "rpcs": { + "CreateService": { + "methods": [ + "CreateService" + ] + }, + "CreateServiceLevelObjective": { + "methods": [ + "CreateServiceLevelObjective" + ] + }, + "DeleteService": { + "methods": [ + "DeleteService" + ] + }, + "DeleteServiceLevelObjective": { + "methods": [ + "DeleteServiceLevelObjective" + ] + }, + "GetService": { + "methods": [ + "GetService" + ] + }, + "GetServiceLevelObjective": { + "methods": [ + "GetServiceLevelObjective" + ] + }, + "ListServiceLevelObjectives": { + "methods": [ + "ListServiceLevelObjectives" + ] + }, + "ListServices": { + "methods": [ + "ListServices" + ] + }, + "UpdateService": { + "methods": [ + "UpdateService" + ] + }, + "UpdateServiceLevelObjective": { + "methods": [ + "UpdateServiceLevelObjective" + ] + } + } + } + } + }, + "SnoozeService": { + "clients": { + "grpc": { + "libraryClient": "SnoozeClient", + "rpcs": { + "CreateSnooze": { + "methods": [ + "CreateSnooze" + ] + }, + "GetSnooze": { + "methods": [ + "GetSnooze" + ] + }, + "ListSnoozes": { + "methods": [ + "ListSnoozes" + ] + }, + "UpdateSnooze": { + "methods": [ + "UpdateSnooze" + ] + } + } + } + } + }, + "UptimeCheckService": { + "clients": { + "grpc": { + "libraryClient": "UptimeCheckClient", + "rpcs": { + "CreateUptimeCheckConfig": { + "methods": [ + "CreateUptimeCheckConfig" + ] + }, + "DeleteUptimeCheckConfig": { + "methods": [ + "DeleteUptimeCheckConfig" + ] + }, + "GetUptimeCheckConfig": { + "methods": [ + "GetUptimeCheckConfig" + ] + }, + "ListUptimeCheckConfigs": { + "methods": [ + "ListUptimeCheckConfigs" + ] + }, + "ListUptimeCheckIps": { + "methods": [ + "ListUptimeCheckIps" + ] + }, + "UpdateUptimeCheckConfig": { + "methods": [ + "UpdateUptimeCheckConfig" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go new file mode 100644 index 000000000..da216081d --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -0,0 +1,466 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newGroupClientHook clientHook + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + return &GroupCallOptions{ + ListGroups: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateGroup: []gax.CallOption{ + gax.WithTimeout(180000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListGroupMembers: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API. +type internalGroupClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator + GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error + ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator +} + +// GroupClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +type GroupClient struct { + // The internal transport-dependent client. + internalClient internalGroupClient + + // The call options for this service. + CallOptions *GroupCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + return c.internalClient.ListGroups(ctx, req, opts...) +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.GetGroup(ctx, req, opts...) +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.CreateGroup(ctx, req, opts...) +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.UpdateGroup(ctx, req, opts...) +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteGroup(ctx, req, opts...) +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + return c.internalClient.ListGroupMembers(ctx, req, opts...) +} + +// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type groupGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing GroupClient + CallOptions **GroupCallOptions + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewGroupClient creates a new group service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + clientOpts := defaultGroupGRPCClientOptions() + if newGroupClientHook != nil { + hookOpts, err := newGroupClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := GroupClient{CallOptions: defaultGroupCallOptions()} + + c := &groupGRPCClient{ + connPool: connPool, + groupClient: monitoringpb.NewGroupServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *groupGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *groupGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + resp := &monitoringpb.ListGroupsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetGroup(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + resp := &monitoringpb.ListGroupMembersResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMembers(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go new file mode 100644 index 000000000..d43d261d1 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -0,0 +1,578 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newMetricClientHook clientHook + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption + CreateServiceTimeSeries []gax.CallOption +} + +func defaultMetricGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMonitoredResourceDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListMetricDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + DeleteMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTimeSeries: []gax.CallOption{ + gax.WithTimeout(90000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateTimeSeries: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + CreateServiceTimeSeries: []gax.CallOption{}, + } +} + +// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API. +type internalMetricClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator + GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) + ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator + GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error + ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator + CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error + CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error +} + +// MetricClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +type MetricClient struct { + // The internal transport-dependent client. + internalClient internalMetricClient + + // The call options for this service. + CallOptions *MetricCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...) +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...) +} + +// ListMetricDescriptors lists metric descriptors that match a filter. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + return c.internalClient.ListMetricDescriptors(ctx, req, opts...) +} + +// GetMetricDescriptor gets a single metric descriptor. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.GetMetricDescriptor(ctx, req, opts...) +} + +// CreateMetricDescriptor creates a new metric descriptor. +// The creation is executed asynchronously. +// User-created metric descriptors define +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics). +// The metric descriptor is updated if it already exists, +// except that metric labels are never removed. +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.CreateMetricDescriptor(ctx, req, opts...) +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be +// deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...) +} + +// ListTimeSeries lists time series that match a filter. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + return c.internalClient.ListTimeSeries(ctx, req, opts...) +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +// This method does not support +// resource locations constraint of an organization +// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateTimeSeries(ctx, req, opts...) +} + +// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time +// series is a time series for a metric from a Google Cloud service. The +// response is empty if all time series in the request were written. If any +// time series could not be written, a corresponding failure message is +// included in the error response. This endpoint rejects writes to +// user-defined metrics. +// This method is only for use by Google Cloud services. Use +// projects.timeSeries.create +// instead. +func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...) +} + +// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type metricGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing MetricClient + CallOptions **MetricCallOptions + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewMetricClient creates a new metric service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + clientOpts := defaultMetricGRPCClientOptions() + if newMetricClientHook != nil { + hookOpts, err := newMetricClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := MetricClient{CallOptions: defaultMetricCallOptions()} + + c := &metricGRPCClient{ + connPool: connPool, + metricClient: monitoringpb.NewMetricServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *metricGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *metricGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + resp := &monitoringpb.ListMetricDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + resp := &monitoringpb.ListTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeries(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateServiceTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go new file mode 100644 index 000000000..e7b3595fa --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -0,0 +1,2432 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all the conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +// Enum value maps for AlertPolicy_ConditionCombinerType. +var ( + AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", + } + AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, + } +) + +func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType { + p := new(AlertPolicy_ConditionCombinerType) + *p = x + return p +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor() +} + +func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[0] +} + +func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead. +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +// An enumeration of possible severity level for an Alert Policy. +type AlertPolicy_Severity int32 + +const ( + // No severity is specified. This is the default value. + AlertPolicy_SEVERITY_UNSPECIFIED AlertPolicy_Severity = 0 + // This is the highest severity level. Use this if the problem could + // cause significant damage or downtime. + AlertPolicy_CRITICAL AlertPolicy_Severity = 1 + // This is the medium severity level. Use this if the problem could + // cause minor damage or downtime. + AlertPolicy_ERROR AlertPolicy_Severity = 2 + // This is the lowest severity level. Use this if the problem is not causing + // any damage or downtime, but could potentially lead to a problem in the + // future. + AlertPolicy_WARNING AlertPolicy_Severity = 3 +) + +// Enum value maps for AlertPolicy_Severity. +var ( + AlertPolicy_Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "CRITICAL", + 2: "ERROR", + 3: "WARNING", + } + AlertPolicy_Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "CRITICAL": 1, + "ERROR": 2, + "WARNING": 3, + } +) + +func (x AlertPolicy_Severity) Enum() *AlertPolicy_Severity { + p := new(AlertPolicy_Severity) + *p = x + return p +} + +func (x AlertPolicy_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[1].Descriptor() +} + +func (AlertPolicy_Severity) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[1] +} + +func (x AlertPolicy_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Severity.Descriptor instead. +func (AlertPolicy_Severity) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +// A condition control that determines how metric-threshold conditions +// are evaluated when data stops arriving. +// This control doesn't affect metric-absence policies. +type AlertPolicy_Condition_EvaluationMissingData int32 + +const ( + // An unspecified evaluation missing data option. Equivalent to + // EVALUATION_MISSING_DATA_NO_OP. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED AlertPolicy_Condition_EvaluationMissingData = 0 + // If there is no data to evaluate the condition, then evaluate the + // condition as false. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_INACTIVE AlertPolicy_Condition_EvaluationMissingData = 1 + // If there is no data to evaluate the condition, then evaluate the + // condition as true. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_ACTIVE AlertPolicy_Condition_EvaluationMissingData = 2 + // Do not evaluate the condition to any value if there is no data. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_NO_OP AlertPolicy_Condition_EvaluationMissingData = 3 +) + +// Enum value maps for AlertPolicy_Condition_EvaluationMissingData. +var ( + AlertPolicy_Condition_EvaluationMissingData_name = map[int32]string{ + 0: "EVALUATION_MISSING_DATA_UNSPECIFIED", + 1: "EVALUATION_MISSING_DATA_INACTIVE", + 2: "EVALUATION_MISSING_DATA_ACTIVE", + 3: "EVALUATION_MISSING_DATA_NO_OP", + } + AlertPolicy_Condition_EvaluationMissingData_value = map[string]int32{ + "EVALUATION_MISSING_DATA_UNSPECIFIED": 0, + "EVALUATION_MISSING_DATA_INACTIVE": 1, + "EVALUATION_MISSING_DATA_ACTIVE": 2, + "EVALUATION_MISSING_DATA_NO_OP": 3, + } +) + +func (x AlertPolicy_Condition_EvaluationMissingData) Enum() *AlertPolicy_Condition_EvaluationMissingData { + p := new(AlertPolicy_Condition_EvaluationMissingData) + *p = x + return p +} + +func (x AlertPolicy_Condition_EvaluationMissingData) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Condition_EvaluationMissingData) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[2].Descriptor() +} + +func (AlertPolicy_Condition_EvaluationMissingData) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[2] +} + +func (x AlertPolicy_Condition_EvaluationMissingData) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Condition_EvaluationMissingData.Descriptor instead. +func (AlertPolicy_Condition_EvaluationMissingData) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/). +type AlertPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the policy exists. The resource name for this policy. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + // + // The convention for the display_name of a PrometheusQueryLanguageCondition + // is "{rule group name}/{alert name}", where the {rule group name} and + // {alert name} should be taken from the corresponding Prometheus + // configuration file. This convention is not enforced. + // In any case the display_name is not a unique key of the AlertPolicy. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + // + // Note that Prometheus {alert name} is a + // [valid Prometheus label + // names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels), + // whereas Prometheus {rule group} is an unrestricted UTF-8 string. + // This means that they cannot be stored as-is in user labels, because + // they may contain characters that are not allowed in user-label values. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + // If `condition_time_series_query_language` is present, it must be the only + // `condition`. + // If `condition_monitoring_query_language` is present, it must be the only + // `condition`. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions to determine if an + // incident should be opened. + // If `condition_time_series_query_language` is present, this must be + // `COMBINE_UNSPECIFIED`. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Read-only description of how the alert policy is invalid. This field is + // only set when the alert policy is invalid. An invalid alert policy will not + // generate incidents. + Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The format of the entries in this field is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + // Control over how this alert policy's notification channels are notified. + AlertStrategy *AlertPolicy_AlertStrategy `protobuf:"bytes,21,opt,name=alert_strategy,json=alertStrategy,proto3" json:"alert_strategy,omitempty"` + // Optional. The severity of an alert policy indicates how important incidents + // generated by that policy are. The severity level will be displayed on the + // Incident detail page and in notifications. + Severity AlertPolicy_Severity `protobuf:"varint,22,opt,name=severity,proto3,enum=google.monitoring.v3.AlertPolicy_Severity" json:"severity,omitempty"` +} + +func (x *AlertPolicy) Reset() { + *x = AlertPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy) ProtoMessage() {} + +func (x *AlertPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead. +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0} +} + +func (x *AlertPolicy) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if x != nil { + return x.Documentation + } + return nil +} + +func (x *AlertPolicy) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if x != nil { + return x.Conditions + } + return nil +} + +func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if x != nil { + return x.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *AlertPolicy) GetValidity() *status.Status { + if x != nil { + return x.Validity + } + return nil +} + +func (x *AlertPolicy) GetNotificationChannels() []string { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *AlertPolicy) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *AlertPolicy) GetMutationRecord() *MutationRecord { + if x != nil { + return x.MutationRecord + } + return nil +} + +func (x *AlertPolicy) GetAlertStrategy() *AlertPolicy_AlertStrategy { + if x != nil { + return x.AlertStrategy + } + return nil +} + +func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity { + if x != nil { + return x.Severity + } + return AlertPolicy_SEVERITY_UNSPECIFIED +} + +// Documentation that is included in the notifications and incidents +// pertaining to this policy. +type AlertPolicy_Documentation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The body of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. This text can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + // Optional. The subject line of the notification. The subject line may not + // exceed 10,240 bytes. In notifications generated by this policy, the + // contents of the subject line after variable expansion will be truncated + // to 255 bytes or shorter at the latest UTF-8 character boundary. The + // 255-byte limit is recommended by [this + // thread](https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit). + // It is both the limit imposed by some third-party ticketing products and + // it is common to define textual fields in databases as VARCHAR(255). + // + // The contents of the subject line can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // If this field is missing or empty, a default subject line will be + // generated. + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Optional. Links to content such as playbooks, repositories, and other + // resources. This field can contain up to 3 entries. + Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *AlertPolicy_Documentation) Reset() { + *x = AlertPolicy_Documentation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation) ProtoMessage() {} + +func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *AlertPolicy_Documentation) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *AlertPolicy_Documentation) GetMimeType() string { + if x != nil { + return x.MimeType + } + return "" +} + +func (x *AlertPolicy_Documentation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link { + if x != nil { + return x.Links + } + return nil +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the condition exists. The unique resource name for this + // condition. Its format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Cloud Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Cloud Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are assignable to Condition: + // + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + // *AlertPolicy_Condition_ConditionMatchedLog + // *AlertPolicy_Condition_ConditionMonitoringQueryLanguage + // *AlertPolicy_Condition_ConditionPrometheusQueryLanguage + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` +} + +func (x *AlertPolicy_Condition) Reset() { + *x = AlertPolicy_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition) ProtoMessage() {} + +func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *AlertPolicy_Condition) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy_Condition) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMatchedLog() *AlertPolicy_Condition_LogMatch { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMatchedLog); ok { + return x.ConditionMatchedLog + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMonitoringQueryLanguage() *AlertPolicy_Condition_MonitoringQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMonitoringQueryLanguage); ok { + return x.ConditionMonitoringQueryLanguage + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionPrometheusQueryLanguage() *AlertPolicy_Condition_PrometheusQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionPrometheusQueryLanguage); ok { + return x.ConditionPrometheusQueryLanguage + } + return nil +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + // A condition that compares a time series against a threshold. + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + // A condition that checks that a time series continues to + // receive new data points. + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMatchedLog struct { + // A condition that checks for log messages matching given constraints. If + // set, no other conditions can be present. + ConditionMatchedLog *AlertPolicy_Condition_LogMatch `protobuf:"bytes,20,opt,name=condition_matched_log,json=conditionMatchedLog,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMonitoringQueryLanguage struct { + // A condition that uses the Monitoring Query Language to define + // alerts. + ConditionMonitoringQueryLanguage *AlertPolicy_Condition_MonitoringQueryLanguageCondition `protobuf:"bytes,19,opt,name=condition_monitoring_query_language,json=conditionMonitoringQueryLanguage,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionPrometheusQueryLanguage struct { + // A condition that uses the Prometheus query language to define alerts. + ConditionPrometheusQueryLanguage *AlertPolicy_Condition_PrometheusQueryLanguageCondition `protobuf:"bytes,21,opt,name=condition_prometheus_query_language,json=conditionPrometheusQueryLanguage,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMatchedLog) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage) isAlertPolicy_Condition_Condition() {} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires. +type AlertPolicy_AlertStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required for alert policies with a `LogMatch` condition. + // + // This limit is not implemented for alert policies that are not log-based. + NotificationRateLimit *AlertPolicy_AlertStrategy_NotificationRateLimit `protobuf:"bytes,1,opt,name=notification_rate_limit,json=notificationRateLimit,proto3" json:"notification_rate_limit,omitempty"` + // If an alert policy that was active has no data for this long, any open + // incidents will close + AutoClose *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"` + // Control how notifications will be sent out, on a per-channel basis. + NotificationChannelStrategy []*AlertPolicy_AlertStrategy_NotificationChannelStrategy `protobuf:"bytes,4,rep,name=notification_channel_strategy,json=notificationChannelStrategy,proto3" json:"notification_channel_strategy,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy) Reset() { + *x = AlertPolicy_AlertStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationRateLimit() *AlertPolicy_AlertStrategy_NotificationRateLimit { + if x != nil { + return x.NotificationRateLimit + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetAutoClose() *durationpb.Duration { + if x != nil { + return x.AutoClose + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPolicy_AlertStrategy_NotificationChannelStrategy { + if x != nil { + return x.NotificationChannelStrategy + } + return nil +} + +// Links to content such as playbooks, repositories, and other resources. +type AlertPolicy_Documentation_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short display name for the link. The display name must not be empty + // or exceed 63 characters. Example: "playbook". + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The url of a webpage. + // A url can be templatized by using variables + // in the path or the query parameters. The total length of a URL should + // not exceed 2083 characters before and after variable expansion. + // Example: "https://my_domain.com/playbook?name=${resource.name}" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlertPolicy_Documentation_Link) Reset() { + *x = AlertPolicy_Documentation_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation_Link) ProtoMessage() {} + +func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *AlertPolicy_Documentation_Link) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy_Documentation_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A type of trigger. + // + // Types that are assignable to Type: + // + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` +} + +func (x *AlertPolicy_Condition_Trigger) Reset() { + *x = AlertPolicy_Condition_Trigger{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_Trigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} + +func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + // The absolute number of time series that must fail + // the predicate for the condition to be triggered. + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + // The percentage of time series that must fail the + // predicate for the condition to be triggered. + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies a time series that should be used as the denominator of a + // ratio that will be compared with the threshold. If a + // `denominator_filter` is specified, the time series specified by the + // `filter` field will be used as the numerator. + // + // The filter must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // When this field is present, the `MetricThreshold` condition forecasts + // whether the time series is predicted to violate the threshold within + // the `forecast_horizon`. When this field is not set, the + // `MetricThreshold` tests the current value of the timeseries against the + // threshold. + ForecastOptions *AlertPolicy_Condition_MetricThreshold_ForecastOptions `protobuf:"bytes,12,opt,name=forecast_options,json=forecastOptions,proto3" json:"forecast_options,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. To use this control, the value + // of the `duration` field must be greater than or equal to 60 seconds. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold) Reset() { + *x = AlertPolicy_Condition_MetricThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if x != nil { + return x.DenominatorFilter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if x != nil { + return x.DenominatorAggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetForecastOptions() *AlertPolicy_Condition_MetricThreshold_ForecastOptions { + if x != nil { + return x.ForecastOptions + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if x != nil { + return x.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if x != nil { + return x.ThresholdValue + } + return 0 +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. The minimum value of this field + // is 120 seconds. Larger values that are a multiple of a + // minute--for example, 240 or 300 seconds--are supported. + // If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricAbsence) Reset() { + *x = AlertPolicy_Condition_MetricAbsence{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricAbsence) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +// A condition type that checks whether a log message in the [scoping +// project](https://cloud.google.com/monitoring/api/v3#project_name) +// satisfies the given filter. Logs from other projects in the metrics +// scope are not evaluated. +type AlertPolicy_Condition_LogMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A logs-based filter. See [Advanced Logs + // Queries](https://cloud.google.com/logging/docs/view/advanced-queries) + // for how this filter should be constructed. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. A map from a label key to an extractor expression, which is + // used to extract the value for this label key. Each entry in this map is + // a specification for how data should be extracted from log entries that + // match `filter`. Each combination of extracted values is treated as a + // separate rule for the purposes of triggering notifications. Label keys + // and corresponding values can be used in notifications generated by this + // condition. + // + // Please see [the documentation on logs-based metric + // `valueExtractor`s](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor) + // for syntax and examples. + LabelExtractors map[string]string `protobuf:"bytes,2,rep,name=label_extractors,json=labelExtractors,proto3" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AlertPolicy_Condition_LogMatch) Reset() { + *x = AlertPolicy_Condition_LogMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_LogMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {} + +func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_LogMatch.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_LogMatch) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 3} +} + +func (x *AlertPolicy_Condition_LogMatch) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_LogMatch) GetLabelExtractors() map[string]string { + if x != nil { + return x.LabelExtractors + } + return nil +} + +// A condition type that allows alert policies to be defined using +// [Monitoring Query Language](https://cloud.google.com/monitoring/mql). +type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [Monitoring Query Language](https://cloud.google.com/monitoring/mql) + // query that outputs a boolean stream. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,4,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MonitoringQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 4} +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that allows alert policies to be defined using +// [Prometheus Query Language +// (PromQL)](https://prometheus.io/docs/prometheus/latest/querying/basics/). +// +// The PrometheusQueryLanguageCondition message contains information +// from a Prometheus alerting rule and its associated rule group. +// +// A Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/). +// The semantics of a Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule). +// +// A Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). +// The semantics of a Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group). +// +// Because Cloud Alerting has no representation of a Prometheus rule +// group resource, we must embed the information of the parent rule +// group inside each of the conditions that refer to it. We must also +// update the contents of all Prometheus alerts in case the information +// of their rule group changes. +// +// The PrometheusQueryLanguageCondition protocol buffer combines the +// information of the corresponding rule group and alerting rule. +// The structure of the PrometheusQueryLanguageCondition protocol buffer +// does NOT mimic the structure of the Prometheus rule group and alerting +// rule YAML declarations. The PrometheusQueryLanguageCondition protocol +// buffer may change in the future to support future rule group and/or +// alerting rule features. There are no new such features at the present +// time (2023-06-26). +type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The PromQL expression to evaluate. Every evaluation cycle + // this expression is evaluated at the current time, and all resultant + // time series become pending/firing alerts. This field must not be empty. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // Optional. Alerts are considered firing once their PromQL expression was + // evaluated to be "true" for this long. + // Alerts whose PromQL expression was not evaluated to be "true" for + // long enough are considered pending. + // Must be a non-negative duration or missing. + // This field is optional. Its default value is zero. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // Optional. How often this rule should be evaluated. + // Must be a positive multiple of 30 seconds or missing. + // This field is optional. Its default value is 30 seconds. + // If this PrometheusQueryLanguageCondition was generated from a + // Prometheus alerting rule, then this value should be taken from the + // enclosing rule group. + EvaluationInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=evaluation_interval,json=evaluationInterval,proto3" json:"evaluation_interval,omitempty"` + // Optional. Labels to add to or overwrite in the PromQL query result. + // Label names [must be + // valid](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // Label values can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // The only available variable names are the names of the labels in the + // PromQL result, including "__name__" and "value". "labels" may be empty. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. The rule group name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must + // contain a valid UTF-8 string. + // This field may not exceed 2048 Unicode characters in length. + RuleGroup string `protobuf:"bytes,5,opt,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` + // Optional. The alerting rule name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must be a + // [valid Prometheus label + // name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // This field may not exceed 2048 Unicode characters in length. + AlertRule string `protobuf:"bytes,6,opt,name=alert_rule,json=alertRule,proto3" json:"alert_rule,omitempty"` +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_PrometheusQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 5} +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetEvaluationInterval() *durationpb.Duration { + if x != nil { + return x.EvaluationInterval + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetRuleGroup() string { + if x != nil { + return x.RuleGroup + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetAlertRule() string { + if x != nil { + return x.AlertRule + } + return "" +} + +// Options used when forecasting the time series and testing +// the predicted value against the threshold. +type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The length of time into the future to forecast whether a + // time series will violate the threshold. If the predicted value is + // found to violate the threshold, and the violation is observed in all + // forecasts made for the configured `duration`, then the time series is + // considered to be failing. + // The forecast horizon can range from 1 hour to 60 hours. + ForecastHorizon *durationpb.Duration `protobuf:"bytes,1,opt,name=forecast_horizon,json=forecastHorizon,proto3" json:"forecast_horizon,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() { + *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold_ForecastOptions.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1, 0} +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) GetForecastHorizon() *durationpb.Duration { + if x != nil { + return x.ForecastHorizon + } + return nil +} + +// Control over the rate of notifications sent to this alert policy's +// notification channels. +type AlertPolicy_AlertStrategy_NotificationRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not more than one notification per `period`. + Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationRateLimit.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires, on a per-channel basis. +type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for the notification channels that these + // settings apply to. Each of these correspond to the name field in one + // of the NotificationChannel objects referenced in the + // notification_channels field of this AlertPolicy. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"` + // The frequency at which to send reminder notifications for open + // incidents. + RenotifyInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=renotify_interval,json=renotifyInterval,proto3" json:"renotify_interval,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationChannelStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 1} +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetNotificationChannelNames() []string { + if x != nil { + return x.NotificationChannelNames + } + return nil +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetRenotifyInterval() *durationpb.Duration { + if x != nil { + return x.RenotifyInterval + } + return nil +} + +var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x2b, 0x0a, + 0x0b, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x6f, 0x63, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4b, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, + 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, + 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x56, 0x0a, 0x0e, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x4b, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xf3, + 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, + 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, 0x0a, + 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, + 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, + 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x1a, + 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, + 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, 0x20, + 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x23, + 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x45, + 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, + 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, + 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, + 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc +) + +func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_proto_rawDescData +} + +var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_alert_proto_goTypes = []any{ + (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType + (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity + (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + (*AlertPolicy)(nil), // 3: google.monitoring.v3.AlertPolicy + (*AlertPolicy_Documentation)(nil), // 4: google.monitoring.v3.AlertPolicy.Documentation + (*AlertPolicy_Condition)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition + (*AlertPolicy_AlertStrategy)(nil), // 6: google.monitoring.v3.AlertPolicy.AlertStrategy + nil, // 7: google.monitoring.v3.AlertPolicy.UserLabelsEntry + (*AlertPolicy_Documentation_Link)(nil), // 8: google.monitoring.v3.AlertPolicy.Documentation.Link + (*AlertPolicy_Condition_Trigger)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.Trigger + (*AlertPolicy_Condition_MetricThreshold)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + (*AlertPolicy_Condition_MetricAbsence)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + (*AlertPolicy_Condition_LogMatch)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.LogMatch + (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + nil, // 16: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + nil, // 17: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*status.Status)(nil), // 21: google.rpc.Status + (*MutationRecord)(nil), // 22: google.monitoring.v3.MutationRecord + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (*Aggregation)(nil), // 24: google.monitoring.v3.Aggregation + (ComparisonType)(0), // 25: google.monitoring.v3.ComparisonType +} +var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ + 4, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation + 7, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry + 5, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition + 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType + 20, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue + 21, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status + 22, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord + 22, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord + 6, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy + 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity + 8, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link + 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch + 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + 14, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + 18, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + 23, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration + 19, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + 24, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation + 24, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation + 15, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + 25, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType + 23, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration + 9, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 24, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation + 23, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration + 9, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 16, // 29: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + 23, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 9, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 23, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 23, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration + 17, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + 23, // 36: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration + 23, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration + 23, // 38: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_proto_init() } +func file_google_monitoring_v3_alert_proto_init() { + if File_google_monitoring_v3_alert_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_Trigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_LogMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MonitoringQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_PrometheusQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold_ForecastOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationChannelStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + (*AlertPolicy_Condition_ConditionMatchedLog)(nil), + (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil), + (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil), + } + file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, + NumEnums: 3, + NumMessages: 17, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_alert_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_proto = out.File + file_google_monitoring_v3_alert_proto_rawDesc = nil + file_google_monitoring_v3_alert_proto_goTypes = nil + file_google_monitoring_v3_alert_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go new file mode 100644 index 000000000..f0e149d16 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -0,0 +1,1045 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the alerting policy. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. |name| must be + // a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will + // return. The alerting policy that is returned will have a name that contains + // a normalized representation of this name as a prefix but adds a suffix of + // the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the + // container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The requested alerting policy. You should omit the `name` field + // in this policy. The name will be returned in the new policy, including a + // new `[ALERT_POLICY_ID]` value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *CreateAlertPolicyRequest) Reset() { + *x = CreateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAlertPolicyRequest) ProtoMessage() {} + +func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAlertPolicyRequest) Reset() { + *x = GetAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAlertPolicyRequest) ProtoMessage() {} + +func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // alert policies are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListAlertPoliciesRequest) Reset() { + *x = ListAlertPoliciesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesRequest) ProtoMessage() {} + +func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListAlertPoliciesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListAlertPoliciesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of alert policies in all pages. This number is only an + // estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListAlertPoliciesResponse) Reset() { + *x = ListAlertPoliciesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesResponse) ProtoMessage() {} + +func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if x != nil { + return x.AlertPolicies + } + return nil +} + +func (x *ListAlertPoliciesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListAlertPoliciesResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // - The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // - Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *UpdateAlertPolicyRequest) Reset() { + *x = UpdateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAlertPolicyRequest) ProtoMessage() {} + +func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteAlertPolicyRequest) Reset() { + *x = DeleteAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteAlertPolicyRequest) ProtoMessage() {} + +func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x18, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac, 0x01, 0x0a, 0x19, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x61, 0x6c, 0x65, 0x72, + 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x18, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, + 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, + 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, + 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, + 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, + 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, + 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, + 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc +) + +func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_service_proto_rawDescData +} + +var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_google_monitoring_v3_alert_service_proto_goTypes = []any{ + (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest + (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest + (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest + (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse + (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest + (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest + (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy + (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty +} +var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{ + 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy + 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest + 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest + 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest + 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest + 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest + 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse + 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty + 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 9, // [9:14] is the sub-list for method output_type + 4, // [4:9] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_service_proto_init() } +func file_google_monitoring_v3_alert_service_proto_init() { + if File_google_monitoring_v3_alert_service_proto != nil { + return + } + file_google_monitoring_v3_alert_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_service_proto = out.File + file_google_monitoring_v3_alert_service_proto_rawDesc = nil + file_google_monitoring_v3_alert_service_proto_goTypes = nil + file_google_monitoring_v3_alert_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedAlertPolicyServiceServer struct { +} + +func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented") +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go new file mode 100644 index 000000000..c9aa5a024 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -0,0 +1,1165 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/common.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + distribution "google.golang.org/genproto/googleapis/api/distribution" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies an ordering relationship on two arguments, called `left` and +// `right`. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // True if the left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // True if the left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // True if the left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // True if the left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // True if the left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // True if the left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +// Enum value maps for ComparisonType. +var ( + ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", + } + ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, + } +) + +func (x ComparisonType) Enum() *ComparisonType { + p := new(ComparisonType) + *p = x + return p +} + +func (x ComparisonType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ComparisonType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor() +} + +func (ComparisonType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[0] +} + +func (x ComparisonType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ComparisonType.Descriptor instead. +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +// The tier of service for a Metrics Scope. Please see the +// [service tiers +// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more +// details. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/common.proto. +type ServiceTier int32 + +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Cloud Monitoring Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Cloud Monitoring Premium tier, a higher, more expensive tier of service + // that provides access to all Cloud Monitoring features, lets you use Cloud + // Monitoring with AWS accounts, and has a larger allotments for logs and + // metrics. For more details, see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +// Enum value maps for ServiceTier. +var ( + ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", + } + ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, + } +) + +func (x ServiceTier) Enum() *ServiceTier { + p := new(ServiceTier) + *p = x + return p +} + +func (x ServiceTier) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceTier) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor() +} + +func (ServiceTier) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[1] +} + +func (x ServiceTier) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceTier.Descriptor instead. +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +// The `Aligner` specifies the operation that will be applied to the data +// points in each alignment period in a time series. Except for +// `ALIGN_NONE`, which specifies that no operation be applied, each alignment +// operation replaces the set of data values in each alignment period with +// a single value: the result of applying the operation to the data values. +// An aligned time series has a single data value at the end of each +// `alignment_period`. +// +// An alignment operation can change the data type of the values, too. For +// example, if you apply a counting operation to boolean values, the data +// `value_type` in the original time series is `BOOLEAN`, but the `value_type` +// in the aligned result is `INT64`. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-series reduction + // is requested. The `value_type` of the result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA]. + // The output is `delta = y1 - y0`. + // + // This alignment is valid for + // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and + // `DELTA` metrics. If the selected alignment period results in periods + // with no data, then the aligned value for such a period is created by + // interpolation. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. The result is computed as + // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time". + // Think of this aligner as providing the slope of the line that passes + // through the value at the start and at the end of the `alignment_period`. + // + // This aligner is valid for `CUMULATIVE` + // and `DELTA` metrics with numeric values. If the selected alignment + // period results in periods with no data, then the aligned value for + // such a period is created by interpolation. The output is a `GAUGE` + // metric with `value_type` `DOUBLE`. + // + // If, by "rate", you mean "percentage change", see the + // `ALIGN_PERCENT_CHANGE` aligner instead. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the alignment + // period boundary. This aligner is valid for `GAUGE` metrics with + // numeric values. The `value_type` of the aligned result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by moving the most recent data point before the end of the + // alignment period to the boundary at the end of the alignment + // period. This aligner is valid for `GAUGE` metrics. The `value_type` of + // the aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align the time series by returning the minimum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align the time series by returning the maximum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align the time series by returning the mean value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is `DOUBLE`. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align the time series by returning the number of values in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric or Boolean values. The `value_type` of the aligned result is + // `INT64`. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align the time series by returning the sum of the values in each + // alignment period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with numeric and distribution values. The `value_type` of the + // aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align the time series by returning the standard deviation of the values + // in each alignment period. This aligner is valid for `GAUGE` and + // `DELTA` metrics with numeric values. The `value_type` of the output is + // `DOUBLE`. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align the time series by returning the number of `True` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align the time series by returning the number of `False` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align the time series by returning the ratio of the number of `True` + // values to the total number of values in each alignment period. This + // aligner is valid for `GAUGE` metrics with Boolean values. The output + // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 99th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 95th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 50th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 5th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This aligner is valid for + // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns + // `((current - previous)/previous) * 100`, where the value of `previous` is + // determined based on the `alignment_period`. + // + // If the values of `current` and `previous` are both 0, then the returned + // value is 0. If only `previous` is 0, the returned value is infinity. + // + // A 10-minute moving mean is computed at each point of the alignment period + // prior to the above calculation to smooth the metric and prevent false + // positives from very short-lived spikes. The moving mean is only + // applicable for data whose values are `>= 0`. Any values `< 0` are + // treated as a missing datapoint, and are ignored. While `DELTA` + // metrics are accepted by this alignment, special care should be taken that + // the values for the metric will always be positive. The output is a + // `GAUGE` metric with `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +// Enum value maps for Aggregation_Aligner. +var ( + Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", + } + Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, + } +) + +func (x Aggregation_Aligner) Enum() *Aggregation_Aligner { + p := new(Aggregation_Aligner) + *p = x + return p +} + +func (x Aggregation_Aligner) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor() +} + +func (Aggregation_Aligner) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[2] +} + +func (x Aggregation_Aligner) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Aligner.Descriptor instead. +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0} +} + +// A Reducer operation describes how to aggregate data points from multiple +// time series into a single time series, where the value of each data point +// in the resulting series is a function of all the already aligned values in +// the input time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the `Aligner` is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean value across time series for each + // alignment period. This reducer is valid for + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and + // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with + // numeric or distribution values. The `value_type` of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric and distribution values. The `value_type` of the output is + // the same as the `value_type` of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics with numeric or distribution values. The `value_type` + // of the output is `DOUBLE`. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the number of data points across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of numeric, Boolean, distribution, and string + // `value_type`. The `value_type` of the output is `INT64`. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the number of `True`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the number of `False`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the ratio of the number of `True`-valued data points + // to the total number of data points for each alignment period. This + // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`. + // The output value is in the range [0.0, 1.0] and has `value_type` + // `DOUBLE`. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing the [99th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing the [95th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing the [50th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing the [5th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +// Enum value maps for Aggregation_Reducer. +var ( + Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", + } + Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, + } +) + +func (x Aggregation_Reducer) Enum() *Aggregation_Reducer { + p := new(Aggregation_Reducer) + *p = x + return p +} + +func (x Aggregation_Reducer) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor() +} + +func (Aggregation_Reducer) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[3] +} + +func (x Aggregation_Reducer) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Reducer.Descriptor instead. +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The typed value field. + // + // Types that are assignable to Value: + // + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` +} + +func (x *TypedValue) Reset() { + *x = TypedValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedValue) ProtoMessage() {} + +func (x *TypedValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead. +func (*TypedValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *TypedValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *TypedValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *TypedValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *TypedValue) GetStringValue() string { + if x, ok := x.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + // A Boolean value: `true` or `false`. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + // A 64-bit integer. Its range is approximately ±9.2x1018. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + // A 64-bit double-precision floating-point number. Its magnitude + // is approximately ±10±300 and it has 16 + // significant digits of precision. + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + // A variable-length string value. + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + // A distribution value. + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +// Describes a time interval: +// +// - Reads: A half-open time interval. It includes the end time but +// excludes the start time: `(startTime, endTime]`. The start time +// must be specified, must be earlier than the end time, and should be +// no older than the data retention period for the metric. +// - Writes: A closed time interval. It extends from the start time to the end +// time, +// and includes both: `[startTime, endTime]`. Valid time intervals +// depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. The end time must not be earlier than the start +// time, and the end time must not be more than 25 hours in the past or more +// than five minutes in the future. +// - For `GAUGE` metrics, the `startTime` value is technically optional; if +// no value is specified, the start time defaults to the value of the +// end time, and the interval represents a single point in time. If both +// start and end times are specified, they must be identical. Such an +// interval is valid only for `GAUGE` metrics, which are point-in-time +// measurements. The end time of a new interval must be at least a +// millisecond after the end time of the previous interval. +// - For `DELTA` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying contiguous and +// non-overlapping intervals. For `DELTA` metrics, the start time of +// the next interval must be at least a millisecond after the end time +// of the previous interval. +// - For `CUMULATIVE` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying the same +// start time and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. The new start time must be at least a millisecond after the +// end time of the previous interval. +// - The start time of a new interval must be at least a millisecond after +// the +// end time of the previous interval because intervals are closed. If the +// start time of a new interval is the same as the end time of the +// previous interval, then data written at the new start time could +// overwrite data written at the previous end time. +type TimeInterval struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The end of the time interval. + EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` +} + +func (x *TimeInterval) Reset() { + *x = TimeInterval{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeInterval) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeInterval) ProtoMessage() {} + +func (x *TimeInterval) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead. +func (*TimeInterval) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide a different view of +// the data. Aggregation of time series is done in two steps. First, each time +// series in the set is _aligned_ to the same time interval boundaries, then the +// set of time series is optionally _reduced_ in number. +// +// Alignment consists of applying the `per_series_aligner` operation +// to each time series after its data has been divided into regular +// `alignment_period` time intervals. This process takes _all_ of the data +// points in an alignment period, applies a mathematical transformation such as +// averaging, minimum, maximum, delta, etc., and converts them into a single +// data point per period. +// +// Reduction is when the aligned and transformed time series can optionally be +// combined, reducing the number of time series through similar mathematical +// transformations. Reduction involves applying a `cross_series_reducer` to +// all the time series, optionally sorting the time series into subsets with +// `group_by_fields`, and applying the reducer to each subset. +// +// The raw time series data can contain a huge amount of information from +// multiple sources. Alignment and reduction transforms this mass of data into +// a more manageable and representative collection of data, for example "the +// 95% latency across the average of all tasks in a cluster". This +// representative data can be more easily graphed and comprehended, and the +// individual time series data is still available for later drilldown. For more +// details, see [Filtering and +// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation). +type Aggregation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `alignment_period` specifies a time interval, in seconds, that is used + // to divide the data in all the + // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + // time. This will be done before the per-series aligner can be applied to + // the data. + // + // The value must be at least 60 seconds. If a per-series + // aligner other than `ALIGN_NONE` is specified, this field is required or an + // error is returned. If no per-series aligner is specified, or the aligner + // `ALIGN_NONE` is specified, then this field is ignored. + // + // The maximum value of the `alignment_period` is 104 weeks (2 years) for + // charts, and 90,000 seconds (25 hours) for alerting policies. + AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // An `Aligner` describes how to bring the data points in a single + // time series into temporal alignment. Except for `ALIGN_NONE`, all + // alignments cause all the data points in an `alignment_period` to be + // mathematically grouped together, resulting in a single data point for + // each `alignment_period` with end timestamp at the end of the period. + // + // Not all alignment operations may be applied to all time series. The valid + // choices depend on the `metric_kind` and `value_type` of the original time + // series. Alignment can change the `metric_kind` or the `value_type` of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `cross_series_reducer` is specified, then + // `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + // and `alignment_period` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The reduction operation to be used to combine time series into a single + // time series, where the value of each data point in the resulting series is + // a function of all the already aligned values in the input time series. + // + // Not all reducer operations can be applied to all time series. The valid + // choices depend on the `metric_kind` and the `value_type` of the original + // time series. Reduction can yield a time series with a different + // `metric_kind` or `value_type` than the input time series. + // + // Time series data must first be aligned (see `per_series_aligner`) in order + // to perform cross-time series reduction. If `cross_series_reducer` is + // specified, then `per_series_aligner` must be specified, and must not be + // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + // error is returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `cross_series_reducer` is + // specified. The `group_by_fields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // operation. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `cross_series_reducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `group_by_fields` are aggregated away. If + // `group_by_fields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `cross_series_reducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` +} + +func (x *Aggregation) Reset() { + *x = Aggregation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Aggregation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Aggregation) ProtoMessage() {} + +func (x *Aggregation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead. +func (*Aggregation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration { + if x != nil { + return x.AlignmentPeriod + } + return nil +} + +func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if x != nil { + return x.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if x != nil { + return x.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (x *Aggregation) GetGroupByFields() []string { + if x != nil { + return x.GroupByFields + } + return nil +} + +var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, + 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54, + 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65, + 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07, + 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, + 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, + 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e, + 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15, + 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c, + 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, + 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, + 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, + 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, + 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, + 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13, + 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, + 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22, + 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a, + 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, + 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, + 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, + 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30, + 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, + 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, + 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, + 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, + 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, + 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, + 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11, + 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10, + 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, + 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, + 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, + 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, + 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, + 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_common_proto_rawDescOnce sync.Once + file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc +) + +func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData) + }) + return file_google_monitoring_v3_common_proto_rawDescData +} + +var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_monitoring_v3_common_proto_goTypes = []any{ + (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType + (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier + (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner + (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer + (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue + (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation + (*distribution.Distribution)(nil), // 7: google.api.Distribution + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_google_monitoring_v3_common_proto_depIdxs = []int32{ + 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution + 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp + 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp + 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration + 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner + 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_common_proto_init() } +func file_google_monitoring_v3_common_proto_init() { + if File_google_monitoring_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*TypedValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeInterval); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Aggregation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_common_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_common_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_common_proto_msgTypes, + }.Build() + File_google_monitoring_v3_common_proto = out.File + file_google_monitoring_v3_common_proto_rawDesc = nil + file_google_monitoring_v3_common_proto_goTypes = nil + file_google_monitoring_v3_common_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go new file mode 100644 index 000000000..7b1dc962d --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -0,0 +1,197 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/dropped_labels.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A set of (label, value) pairs that were removed from a Distribution +// time series during aggregation and then added as an attachment to a +// Distribution.Exemplar. +// +// The full label set for the exemplars is constructed by using the dropped +// pairs in combination with the label values that remain on the aggregated +// Distribution time series. The constructed full label set can be used to +// identify the specific entity, such as the instance or job, which might be +// contributing to a long-tail. However, with dropped labels, the storage +// requirements are reduced because only the aggregated distribution values for +// a large group of time series are stored. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DroppedLabels) Reset() { + *x = DroppedLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DroppedLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DroppedLabels) ProtoMessage() {} + +func (x *DroppedLabels) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead. +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0} +} + +func (x *DroppedLabels) GetLabel() map[string]string { + if x != nil { + return x.Label + } + return nil +} + +var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once + file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc +) + +func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData) + }) + return file_google_monitoring_v3_dropped_labels_proto_rawDescData +} + +var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{ + (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels + nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry +} +var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_dropped_labels_proto_init() } +func file_google_monitoring_v3_dropped_labels_proto_init() { + if File_google_monitoring_v3_dropped_labels_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DroppedLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes, + }.Build() + File_google_monitoring_v3_dropped_labels_proto = out.File + file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil + file_google_monitoring_v3_dropped_labels_proto_goTypes = nil + file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go new file mode 100644 index 000000000..dff27f9d8 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -0,0 +1,265 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The name of this group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `[GROUP_ID]` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // For groups with no parent, `parent_name` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this + // group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0} +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Group) GetParentName() string { + if x != nil { + return x.ParentName + } + return "" +} + +func (x *Group) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *Group) GetIsCluster() bool { + if x != nil { + return x.IsCluster + } + return false +} + +var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea, + 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, + 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc +) + +func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData) + }) + return file_google_monitoring_v3_group_proto_rawDescData +} + +var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_group_proto_goTypes = []any{ + (*Group)(nil), // 0: google.monitoring.v3.Group +} +var file_google_monitoring_v3_group_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_proto_init() } +func file_google_monitoring_v3_group_proto_init() { + if File_google_monitoring_v3_group_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_group_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_proto = out.File + file_google_monitoring_v3_group_proto_rawDesc = nil + file_google_monitoring_v3_group_proto_goTypes = nil + file_google_monitoring_v3_group_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go new file mode 100644 index 000000000..46747d906 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -0,0 +1,1319 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListGroup` request. +type ListGroupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // groups are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit + // the groups returned based on their parent-child relationship with the + // specified group. If no filter is specified, all groups are returned. + // + // Types that are assignable to Filter: + // + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListGroupsRequest) Reset() { + *x = ListGroupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsRequest) ProtoMessage() {} + +func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead. +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListGroupsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (x *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups whose `parent_name` field contains the group + // name. If no groups have this parent, the results are empty. + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups that are ancestors of the specified group. + // The groups are returned in order, starting with the immediate parent and + // ending with the most distant ancestor. If the specified group has no + // immediate parent, the results are empty. + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns the descendants of the specified group. This is a superset of + // the results returned by the `children_of_group` filter, and includes + // children-of-children, and so forth. + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +// The `ListGroups` response. +type ListGroupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListGroupsResponse) Reset() { + *x = ListGroupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsResponse) ProtoMessage() {} + +func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead. +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListGroupsResponse) GetGroup() []*Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *ListGroupsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetGroupRequest) Reset() { + *x = GetGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGroupRequest) ProtoMessage() {} + +func (x *GetGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead. +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Required. A group definition. It is an error to define the `name` field + // because the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *CreateGroupRequest) Reset() { + *x = CreateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateGroupRequest) ProtoMessage() {} + +func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead. +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *CreateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The new definition of the group. All fields of the existing + // group, excepting `name`, are replaced with the corresponding fields of this + // group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *UpdateGroupRequest) Reset() { + *x = UpdateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateGroupRequest) ProtoMessage() {} + +func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead. +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *UpdateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. The default behavior is to be able to delete a +// single group without any descendants. +type DeleteGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If this field is true, then the request means to delete a group with all + // its descendants. Otherwise, the request means to delete a group only when + // it has no descendants. The default value is false. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *DeleteGroupRequest) Reset() { + *x = DeleteGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteGroupRequest) ProtoMessage() {} + +func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead. +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteGroupRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group whose members are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list + // filter](https://cloud.google.com/monitoring/api/learn_more#filtering) + // describing the members to be returned. The filter may reference the type, + // labels, and metadata of monitored resources that comprise the group. For + // example, to return only resources representing Compute Engine VM instances, + // use this filter: + // + // `resource.type = "gce_instance"` + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` +} + +func (x *ListGroupMembersRequest) Reset() { + *x = ListGroupMembersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersRequest) ProtoMessage() {} + +func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead. +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListGroupMembersRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListGroupMembersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupMembersRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListGroupMembersRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListGroupMembersRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListGroupMembersResponse) Reset() { + *x = ListGroupMembersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersResponse) ProtoMessage() {} + +func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead. +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if x != nil { + return x.Members + } + return nil +} + +func (x *ListGroupMembersResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListGroupMembersResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, + 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f, + 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, + 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65, + 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64, + 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01, + 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, + 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, + 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0xda, + 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x25, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b, + 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x3a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0b, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x10, + 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x35, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc +) + +func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData) + }) + return file_google_monitoring_v3_group_service_proto_rawDescData +} + +var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_group_service_proto_goTypes = []any{ + (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest + (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse + (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest + (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest + (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest + (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest + (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest + (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse + (*Group)(nil), // 8: google.monitoring.v3.Group + (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval + (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group + 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group + 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group + 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource + 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest + 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest + 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest + 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest + 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest + 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest + 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse + 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group + 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group + 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group + 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_service_proto_init() } +func file_google_monitoring_v3_group_service_proto_init() { + if File_google_monitoring_v3_group_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_group_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_group_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_service_proto = out.File + file_google_monitoring_v3_group_service_proto_rawDesc = nil + file_google_monitoring_v3_group_service_proto_goTypes = nil + file_google_monitoring_v3_group_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations. +type UnimplementedGroupServiceServer struct { +} + +func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented") +} +func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented") +} +func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") +} +func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented") +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go new file mode 100644 index 000000000..b22c22d07 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -0,0 +1,1194 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + label "google.golang.org/genproto/googleapis/api/label" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A single data point in a time series. +type Point struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time interval to which the data point applies. For `GAUGE` metrics, + // the start time is optional, but if it is supplied, it must equal the + // end time. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Point) Reset() { + *x = Point{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Point) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Point) ProtoMessage() {} + +func (x *Point) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Point.ProtoReflect.Descriptor instead. +func (*Point) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *Point) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Point) GetValue() *TypedValue { + if x != nil { + return x.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. For more information, + // see [Monitored resources for custom + // metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources). + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // time series, this field will include metadata labels that are explicitly + // named in the reduction. When creating a time series, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + Unit string `protobuf:"bytes,8,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeries) Reset() { + *x = TimeSeries{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeries) ProtoMessage() {} + +func (x *TimeSeries) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. +func (*TimeSeries) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeSeries) GetMetric() *metric.Metric { + if x != nil { + return x.Metric + } + return nil +} + +func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeries) GetPoints() []*Point { + if x != nil { + return x.Points + } + return nil +} + +func (x *TimeSeries) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A descriptor for the labels and points in a time series. +type TimeSeriesDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Descriptors for the labels. + LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"` + // Descriptors for the point data value columns. + PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"` +} + +func (x *TimeSeriesDescriptor) Reset() { + *x = TimeSeriesDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2} +} + +func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor { + if x != nil { + return x.LabelDescriptors + } + return nil +} + +func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor { + if x != nil { + return x.PointDescriptors + } + return nil +} + +// Represents the values of a time series associated with a +// TimeSeriesDescriptor. +type TimeSeriesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values of the labels in the time series identifier, given in the same + // order as the `label_descriptors` field of the TimeSeriesDescriptor + // associated with this object. Each value must have a value of the type + // given in the corresponding entry of `label_descriptors`. + LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The points in the time series. + PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"` +} + +func (x *TimeSeriesData) Reset() { + *x = TimeSeriesData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData) ProtoMessage() {} + +func (x *TimeSeriesData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3} +} + +func (x *TimeSeriesData) GetLabelValues() []*LabelValue { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData { + if x != nil { + return x.PointData + } + return nil +} + +// A label value. +type LabelValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label value can be a bool, int64, or string. + // + // Types that are assignable to Value: + // + // *LabelValue_BoolValue + // *LabelValue_Int64Value + // *LabelValue_StringValue + Value isLabelValue_Value `protobuf_oneof:"value"` +} + +func (x *LabelValue) Reset() { + *x = LabelValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValue) ProtoMessage() {} + +func (x *LabelValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead. +func (*LabelValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4} +} + +func (m *LabelValue) GetValue() isLabelValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *LabelValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*LabelValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *LabelValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*LabelValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *LabelValue) GetStringValue() string { + if x, ok := x.GetValue().(*LabelValue_StringValue); ok { + return x.StringValue + } + return "" +} + +type isLabelValue_Value interface { + isLabelValue_Value() +} + +type LabelValue_BoolValue struct { + // A bool label value. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type LabelValue_Int64Value struct { + // An int64 label value. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type LabelValue_StringValue struct { + // A string label value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +func (*LabelValue_BoolValue) isLabelValue_Value() {} + +func (*LabelValue_Int64Value) isLabelValue_Value() {} + +func (*LabelValue_StringValue) isLabelValue_Value() {} + +// An error associated with a query in the time series query language format. +type QueryError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the time series query language text that this error applies + // to. + Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"` + // The error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *QueryError) Reset() { + *x = QueryError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryError) ProtoMessage() {} + +func (x *QueryError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryError.ProtoReflect.Descriptor instead. +func (*QueryError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5} +} + +func (x *QueryError) GetLocator() *TextLocator { + if x != nil { + return x.Locator + } + return nil +} + +func (x *QueryError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A locator for text. Indicates a particular part of the text of a request or +// of an object referenced in the request. +// +// For example, suppose the request field `text` contains: +// +// text: "The quick brown fox jumps over the lazy dog." +// +// Then the locator: +// +// source: "text" +// start_position { +// line: 1 +// column: 17 +// } +// end_position { +// line: 1 +// column: 19 +// } +// +// refers to the part of the text: "fox". +type TextLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The source of the text. The source may be a field in the request, in which + // case its format is the format of the + // google.rpc.BadRequest.FieldViolation.field field in + // https://cloud.google.com/apis/design/errors#error_details. It may also be + // be a source other than the request field (e.g. a macro definition + // referenced in the text of the query), in which case this is the name of + // the source (e.g. the macro name). + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + // The position of the first byte within the text. + StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"` + // The position of the last byte within the text. + EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"` + // If `source`, `start_position`, and `end_position` describe a call on + // some object (e.g. a macro in the time series query language text) and a + // location is to be designated in that object's text, `nested_locator` + // identifies the location within that object. + NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"` + // When `nested_locator` is set, this field gives the reason for the nesting. + // Usually, the reason is a macro invocation. In that case, the macro name + // (including the leading '@') signals the location of the macro call + // in the text and a macro argument name (including the leading '$') signals + // the location of the macro argument inside the macro body that got + // substituted away. + NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"` +} + +func (x *TextLocator) Reset() { + *x = TextLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator) ProtoMessage() {} + +func (x *TextLocator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead. +func (*TextLocator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6} +} + +func (x *TextLocator) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *TextLocator) GetStartPosition() *TextLocator_Position { + if x != nil { + return x.StartPosition + } + return nil +} + +func (x *TextLocator) GetEndPosition() *TextLocator_Position { + if x != nil { + return x.EndPosition + } + return nil +} + +func (x *TextLocator) GetNestedLocator() *TextLocator { + if x != nil { + return x.NestedLocator + } + return nil +} + +func (x *TextLocator) GetNestingReason() string { + if x != nil { + return x.NestingReason + } + return "" +} + +// A descriptor for the value columns in a data point. +type TimeSeriesDescriptor_ValueDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value type. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The value stream kind. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The unit in which `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // `unit` is only valid if `value_type` is INTEGER, DOUBLE, DISTRIBUTION. + Unit string `protobuf:"bytes,4,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() { + *x = TimeSeriesDescriptor_ValueDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A point's value columns and time interval. Each point has one or more +// point values corresponding to the entries in `point_descriptors` field in +// the TimeSeriesDescriptor associated with this object. +type TimeSeriesData_PointData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values that make up the point. + Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // The time interval associated with the point. + TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"` +} + +func (x *TimeSeriesData_PointData) Reset() { + *x = TimeSeriesData_PointData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData_PointData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData_PointData) ProtoMessage() {} + +func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *TimeSeriesData_PointData) GetValues() []*TypedValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval { + if x != nil { + return x.TimeInterval + } + return nil +} + +// The position of a byte within the text. +type TextLocator_Position struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The line, starting with 1, where the byte is positioned. + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + // The column within the line, starting with 1, where the byte is + // positioned. This is a byte index even though the text is UTF-8. + Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *TextLocator_Position) Reset() { + *x = TextLocator_Position{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator_Position) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator_Position) ProtoMessage() {} + +func (x *TextLocator_Position) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead. +func (*TextLocator_Position) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *TextLocator_Position) GetLine() int32 { + if x != nil { + return x.Line + } + return 0 +} + +func (x *TextLocator_Position) GetColumn() int32 { + if x != nil { + return x.Column + } + return 0 +} + +var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, + 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x94, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x48, 0x0a, 0x11, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x10, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x48, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb5, 0x02, + 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf0, 0x02, 0x0a, 0x0b, 0x54, + 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, + 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, + 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0xc6, 0x01, + 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, + 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc +) + +func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_proto_rawDescData +} + +var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_google_monitoring_v3_metric_proto_goTypes = []any{ + (*Point)(nil), // 0: google.monitoring.v3.Point + (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries + (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData + (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue + (*QueryError)(nil), // 5: google.monitoring.v3.QueryError + (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator + (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData + (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position + (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval + (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue + (*metric.Metric)(nil), // 12: google.api.Metric + (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource + (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata + (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind + (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType + (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor +} +var file_google_monitoring_v3_metric_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval + 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue + 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric + 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource + 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata + 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType + 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point + 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor + 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue + 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData + 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator + 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position + 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position + 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator + 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue + 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_proto_init() } +func file_google_monitoring_v3_metric_proto_init() { + if File_google_monitoring_v3_metric_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Point); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeries); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*LabelValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*QueryError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData_PointData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator_Position); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{ + (*LabelValue_BoolValue)(nil), + (*LabelValue_Int64Value)(nil), + (*LabelValue_StringValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_metric_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_proto = out.File + file_google_monitoring_v3_metric_proto_rawDesc = nil + file_google_monitoring_v3_metric_proto_goTypes = nil + file_google_monitoring_v3_metric_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go new file mode 100644 index 000000000..52e1c1e0b --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -0,0 +1,2502 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Controls which fields are returned by `ListTimeSeries*`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +// Enum value maps for ListTimeSeriesRequest_TimeSeriesView. +var ( + ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", + } + ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, + } +) + +func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView { + p := new(ListTimeSeriesRequest_TimeSeriesView) + *p = x + return p +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor() +} + +func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_metric_service_proto_enumTypes[0] +} + +func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead. +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) + // describing the descriptors to be returned. The filter can reference the + // descriptor's type and labels. For example, the following filter returns + // only Google Compute Engine descriptors that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsRequest) Reset() { + *x = ListMonitoredResourceDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsResponse) Reset() { + *x = ListMonitoredResourceDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if x != nil { + return x.ResourceDescriptors + } + return nil +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource descriptor to get. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + // + // The `[RESOURCE_TYPE]` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMonitoredResourceDescriptorRequest) Reset() { + *x = GetMonitoredResourceDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMonitoredResourceDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} + +func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetMonitoredResourceDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. The + // default and maximum value is 10,000. If a page_size <= 0 or > 10,000 is + // submitted, will instead return a maximum of 10,000 results. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMetricDescriptorsRequest) Reset() { + *x = ListMetricDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsRequest) ProtoMessage() {} + +func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListMetricDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMetricDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMetricDescriptorsResponse) Reset() { + *x = ListMetricDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsResponse) ProtoMessage() {} + +func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptors + } + return nil +} + +func (x *ListMetricDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example value of `[METRIC_ID]` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetricDescriptorRequest) Reset() { + *x = GetMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetricDescriptorRequest) ProtoMessage() {} + +func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // 4 + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new [custom + // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` +} + +func (x *CreateMetricDescriptorRequest) Reset() { + *x = CreateMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateMetricDescriptorRequest) ProtoMessage() {} + +func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example of `[METRIC_ID]` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteMetricDescriptorRequest) Reset() { + *x = DeleteMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} + +func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name), + // organization or folder on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // organizations/[ORGANIZATION_ID] + // folders/[FOLDER_ID] + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // Required. A [monitoring + // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies + // which time series should be returned. The filter must specify a single + // metric type, and can additionally specify metric labels and other + // information. For example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.labels.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Required. The time interval for which results should be returned. Only time + // series that contain data points in the specified interval are included in + // the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series across specified labels. + // + // By default (if no `aggregation` is explicitly specified), the raw time + // series data is returned. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Apply a second aggregation after `aggregation` is applied. May only be + // specified if `aggregation` is specified. + SecondaryAggregation *Aggregation `protobuf:"bytes,11,opt,name=secondary_aggregation,json=secondaryAggregation,proto3" json:"secondary_aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // currently returned in reverse time order (most recent to oldest). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Required. Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTimeSeriesRequest) Reset() { + *x = ListTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesRequest) ProtoMessage() {} + +func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListTimeSeriesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if x != nil { + return x.Aggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetSecondaryAggregation() *Aggregation { + if x != nil { + return x.SecondaryAggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if x != nil { + return x.View + } + return ListTimeSeriesRequest_FULL +} + +func (x *ListTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + // The unit in which all `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // If different `time_series` have different units (for example, because they + // come from different metric types, or a unit is absent), then `unit` will be + // "{not_a_unit}". + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *ListTimeSeriesResponse) Reset() { + *x = ListTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesResponse) ProtoMessage() {} + +func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *ListTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if x != nil { + return x.ExecutionErrors + } + return nil +} + +func (x *ListTimeSeriesResponse) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + // + // The maximum number of `TimeSeries` objects per `Create` request is 200. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` +} + +func (x *CreateTimeSeriesRequest) Reset() { + *x = CreateTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesRequest) ProtoMessage() {} + +func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10} +} + +func (x *CreateTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// DEPRECATED. Used to hold per-time-series error status. +type CreateTimeSeriesError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DEPRECATED. Time series ID that resulted in the `status` error. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // DEPRECATED. The status of the requested write operation for `time_series`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *CreateTimeSeriesError) Reset() { + *x = CreateTimeSeriesError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesError) ProtoMessage() {} + +func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11} +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +// Summary of the result of a failed request to write data to a time series. +type CreateTimeSeriesSummary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of points in the request. + TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"` + // The number of points that were successfully written. + SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"` + // The number of points that failed to be written. Order is not guaranteed. + Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *CreateTimeSeriesSummary) Reset() { + *x = CreateTimeSeriesSummary{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 { + if x != nil { + return x.TotalPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 { + if x != nil { + return x.SuccessPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error { + if x != nil { + return x.Errors + } + return nil +} + +// The `QueryTimeSeries` request. +type QueryTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The query in the [Monitoring Query + // Language](https://cloud.google.com/monitoring/mql/reference) format. + // The default time zone is in UTC. + Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"` + // A positive number that is the maximum number of time_series_data to return. + PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *QueryTimeSeriesRequest) Reset() { + *x = QueryTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesRequest) ProtoMessage() {} + +func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13} +} + +func (x *QueryTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *QueryTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `QueryTimeSeries` response. +type QueryTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The descriptor for the time series data. + TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"` + // The time series data. + TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. The available data will be available in the + // response. + PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"` +} + +func (x *QueryTimeSeriesResponse) Reset() { + *x = QueryTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesResponse) ProtoMessage() {} + +func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14} +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor { + if x != nil { + return x.TimeSeriesDescriptor + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData { + if x != nil { + return x.TimeSeriesData + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status { + if x != nil { + return x.PartialErrors + } + return nil +} + +// This is an error detail intended to be used with INVALID_ARGUMENT errors. +type QueryErrorList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Errors in parsing the time series query language text. The number of errors + // in the response may be limited. + Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + // A summary of all the errors. + ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"` +} + +func (x *QueryErrorList) Reset() { + *x = QueryErrorList{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryErrorList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryErrorList) ProtoMessage() {} + +func (x *QueryErrorList) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead. +func (*QueryErrorList) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15} +} + +func (x *QueryErrorList) GetErrors() []*QueryError { + if x != nil { + return x.Errors + } + return nil +} + +func (x *QueryErrorList) GetErrorSummary() string { + if x != nil { + return x.ErrorSummary + } + return "" +} + +// Detailed information about an error category. +type CreateTimeSeriesSummary_Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The number of points that couldn't be written because of `status`. + PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` +} + +func (x *CreateTimeSeriesSummary_Error) Reset() { + *x = CreateTimeSeriesSummary_Error{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary_Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary_Error) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 { + if x != nil { + return x.PointCount + } + return 0 +} + +var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0, + 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x37, + 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, + 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xba, + 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1d, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, + 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, + 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xad, 0x04, 0x0a, 0x15, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x12, 0x24, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x0b, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, + 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x14, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, + 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01, 0x22, 0xd6, 0x01, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x6e, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x88, 0x01, + 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xae, 0x02, 0x0a, 0x17, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, + 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbc, 0x0f, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, 0x01, 0x0a, + 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, + 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, + 0x2a, 0x7d, 0x12, 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, + 0x12, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x5b, + 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, + 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x16, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xfe, + 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x01, 0xda, + 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x2c, 0x76, 0x69, 0x65, 0x77, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x6e, 0x5a, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x99, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0xda, 0x41, 0x10, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0xae, 0x01, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, + 0xda, 0x41, 0x10, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x01, 0x2a, 0x22, 0x2e, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xda, 0x01, 0xca, + 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xba, 0x01, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x89, 0x08, 0xea, 0x41, 0xf0, 0x01, + 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, + 0xea, 0x41, 0xb7, 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, + 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0x51, 0x0a, 0x23, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x12, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0xea, + 0x41, 0xb5, 0x01, 0x0a, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x35, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, + 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x29, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x42, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, + 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc +) + +func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_service_proto_rawDescData +} + +var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_metric_service_proto_goTypes = []any{ + (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest + (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest + (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse + (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest + (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest + (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest + (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest + (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse + (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest + (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError + (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary + (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse + (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList + (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error + (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor + (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor + (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation + (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries + (*status.Status)(nil), // 23: google.rpc.Status + (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData + (*QueryError)(nil), // 26: google.monitoring.v3.QueryError + (*emptypb.Empty)(nil), // 27: google.protobuf.Empty +} +var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{ + 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor + 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor + 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor + 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation + 21, // 5: google.monitoring.v3.ListTimeSeriesRequest.secondary_aggregation:type_name -> google.monitoring.v3.Aggregation + 0, // 6: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + 22, // 7: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 8: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status + 22, // 9: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries + 22, // 10: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 11: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status + 17, // 12: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error + 24, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor + 25, // 14: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData + 23, // 15: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status + 26, // 16: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError + 23, // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status + 1, // 18: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + 3, // 19: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest + 4, // 20: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest + 6, // 21: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest + 7, // 22: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest + 8, // 23: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest + 9, // 24: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest + 11, // 25: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 11, // 26: google.monitoring.v3.MetricService.CreateServiceTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 2, // 27: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + 18, // 28: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor + 5, // 29: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse + 19, // 30: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor + 19, // 31: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor + 27, // 32: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty + 10, // 33: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse + 27, // 34: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty + 27, // 35: google.monitoring.v3.MetricService.CreateServiceTimeSeries:output_type -> google.protobuf.Empty + 27, // [27:36] is the sub-list for method output_type + 18, // [18:27] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_service_proto_init() } +func file_google_monitoring_v3_metric_service_proto_init() { + if File_google_monitoring_v3_metric_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_metric_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetMonitoredResourceDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GetMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DeleteMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*QueryErrorList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary_Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_service_proto = out.File + file_google_monitoring_v3_metric_service_proto_rawDesc = nil + file_google_monitoring_v3_metric_service_proto_goTypes = nil + file_google_monitoring_v3_metric_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type metricServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) +} + +// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricServiceServer struct { +} + +func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateServiceTimeSeries not implemented") +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateServiceTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + { + MethodName: "CreateServiceTimeSeries", + Handler: _MetricService_CreateServiceTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go new file mode 100644 index 000000000..643b244e4 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -0,0 +1,192 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/mutation_record.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes a change made to a configuration. +type MutationRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the change occurred. + MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` +} + +func (x *MutationRecord) Reset() { + *x = MutationRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MutationRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MutationRecord) ProtoMessage() {} + +func (x *MutationRecord) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead. +func (*MutationRecord) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0} +} + +func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp { + if x != nil { + return x.MutateTime + } + return nil +} + +func (x *MutationRecord) GetMutatedBy() string { + if x != nil { + return x.MutatedBy + } + return "" +} + +var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13, + 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once + file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc +) + +func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData) + }) + return file_google_monitoring_v3_mutation_record_proto_rawDescData +} + +var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{ + (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_mutation_record_proto_init() } +func file_google_monitoring_v3_mutation_record_proto_init() { + if File_google_monitoring_v3_mutation_record_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*MutationRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes, + }.Build() + File_google_monitoring_v3_mutation_record_proto = out.File + file_google_monitoring_v3_mutation_record_proto_rawDesc = nil + file_google_monitoring_v3_mutation_record_proto_goTypes = nil + file_google_monitoring_v3_mutation_record_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go new file mode 100644 index 000000000..603b5bcdd --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -0,0 +1,647 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + _ "google.golang.org/genproto/googleapis/api/annotations" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +// Enum value maps for NotificationChannel_VerificationStatus. +var ( + NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", + } + NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, + } +) + +func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus { + p := new(NotificationChannel_VerificationStatus) + *p = x + return p +} + +func (x NotificationChannel_VerificationStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor() +} + +func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_notification_proto_enumTypes[0] +} + +func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead. +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for this descriptor. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email" and "sms". To view the + // full list of channels, see + // [Channel + // descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd). + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` + // The product launch stage for channels of this type. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *NotificationChannelDescriptor) Reset() { + *x = NotificationChannelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannelDescriptor) ProtoMessage() {} + +func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead. +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0} +} + +func (x *NotificationChannelDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannelDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. +func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if x != nil { + return x.SupportedTiers + } + return nil +} + +func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage(0) +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the notification channel. This field matches the + // value of the + // [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] + // field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] + // of the `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Record of the creation of this channel. + CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // Records of the modification of this channel. + MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"` +} + +func (x *NotificationChannel) Reset() { + *x = NotificationChannel{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannel) ProtoMessage() {} + +func (x *NotificationChannel) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead. +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1} +} + +func (x *NotificationChannel) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannel) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannel) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannel) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannel) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *NotificationChannel) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if x != nil { + return x.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *NotificationChannel) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *NotificationChannel) GetMutationRecords() []*MutationRecord { + if x != nil { + return x.MutationRecords + } + return nil +} + +var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65, 0x72, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, + 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, + 0x12, 0x01, 0x2a, 0x22, 0xc6, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, + 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, + 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, + 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, + 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc +) + +func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_proto_rawDescData +} + +var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_monitoring_v3_notification_proto_goTypes = []any{ + (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus + (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel + nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry + nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue + (*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord +} +var file_google_monitoring_v3_notification_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier + 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry + 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry + 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus + 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue + 9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord + 9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_proto_init() } +func file_google_monitoring_v3_notification_proto_init() { + if File_google_monitoring_v3_notification_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_notification_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_proto = out.File + file_google_monitoring_v3_notification_proto_rawDesc = nil + file_google_monitoring_v3_notification_proto_goTypes = nil + file_google_monitoring_v3_notification_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go new file mode 100644 index 000000000..ac7bafd1f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -0,0 +1,2002 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this + // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent + // container in which to look for the descriptors; to retrieve a single + // descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsRequest) Reset() { + *x = ListNotificationChannelDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListNotificationChannelDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsResponse) Reset() { + *x = ListNotificationChannelDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if x != nil { + return x.ChannelDescriptors + } + return nil +} + +func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel type for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelDescriptorRequest) Reset() { + *x = GetNotificationChannelDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} + +func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetNotificationChannelDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container into which the channel will be + // written, this does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *CreateNotificationChannelRequest) Reset() { + *x = CreateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationChannelRequest) ProtoMessage() {} + +func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelsRequest) Reset() { + *x = ListNotificationChannelsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListNotificationChannelsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of notification channels in all pages. This number is only + // an estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListNotificationChannelsResponse) Reset() { + *x = ListNotificationChannelsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5} +} + +func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *ListNotificationChannelsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListNotificationChannelsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelRequest) Reset() { + *x = GetNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelRequest) ProtoMessage() {} + +func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6} +} + +func (x *GetNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fields to update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *UpdateNotificationChannelRequest) Reset() { + *x = UpdateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateNotificationChannelRequest) ProtoMessage() {} + +func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *DeleteNotificationChannelRequest) Reset() { + *x = DeleteNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationChannelRequest) ProtoMessage() {} + +func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteNotificationChannelRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *SendNotificationChannelVerificationCodeRequest) Reset() { + *x = SendNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9} +} + +func (x *SendNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel for which a verification code is to be + // generated and retrieved. This must name a channel that is already verified; + // if the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeRequest) Reset() { + *x = GetNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeResponse) Reset() { + *x = GetNotificationChannelVerificationCodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *VerifyNotificationChannelRequest) Reset() { + *x = VerifyNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyNotificationChannelRequest) ProtoMessage() {} + +func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12} +} + +func (x *VerifyNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *VerifyNotificationChannelRequest) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, + 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x7e, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, + 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0xd0, 0x01, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x22, 0xdb, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xc9, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, + 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x6a, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x20, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, + 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, + 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, + 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, + 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xdd, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc4, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, + 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0xb5, 0x01, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64, 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, + 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x83, 0x02, 0x0a, 0x19, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0xda, + 0x41, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x32, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, + 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x41, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xdc, 0x01, 0x0a, 0x27, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a, 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0xda, 0x41, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x64, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, + 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x72, 0x65, 0x61, 0x64, 0x42, 0xd3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc +) + +func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_service_proto_rawDescData +} + +var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_monitoring_v3_notification_service_proto_goTypes = []any{ + (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest + (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse + (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest + (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest + (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest + (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse + (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest + (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest + (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest + (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest + (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel + (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{ + 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor + 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel + 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp + 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp + 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest + 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest + 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest + 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest + 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest + 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest + 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest + 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest + 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse + 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor + 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse + 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty + 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty + 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // [17:27] is the sub-list for method output_type + 7, // [7:17] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_service_proto_init() } +func file_google_monitoring_v3_notification_service_proto_init() { + if File_google_monitoring_v3_notification_service_proto != nil { + return + } + file_google_monitoring_v3_notification_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UpdateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*DeleteNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*VerifyNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_service_proto = out.File + file_google_monitoring_v3_notification_service_proto_rawDesc = nil + file_google_monitoring_v3_notification_service_proto_goTypes = nil + file_google_monitoring_v3_notification_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNotificationChannelServiceServer struct { +} + +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented") +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go new file mode 100644 index 000000000..e9bfbd68f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -0,0 +1,212 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/query_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x32, 0xde, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_monitoring_v3_query_service_proto_goTypes = []any{ + (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse +} +var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{ + 0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest + 1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_query_service_proto_init() } +func file_google_monitoring_v3_query_service_proto_init() { + if File_google_monitoring_v3_query_service_proto != nil { + return + } + file_google_monitoring_v3_metric_service_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_query_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs, + }.Build() + File_google_monitoring_v3_query_service_proto = out.File + file_google_monitoring_v3_query_service_proto_rawDesc = nil + file_google_monitoring_v3_query_service_proto_goTypes = nil + file_google_monitoring_v3_query_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// QueryServiceClient is the client API for QueryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryServiceClient interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) +} + +type queryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient { + return &queryServiceClient{cc} +} + +func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) { + out := new(QueryTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServiceServer is the server API for QueryService service. +type QueryServiceServer interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) +} + +// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServiceServer struct { +} + +func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented") +} + +func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) { + s.RegisterService(&_QueryService_serviceDesc, srv) +} + +func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.QueryService", + HandlerType: (*QueryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryTimeSeries", + Handler: _QueryService_QueryTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/query_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go new file mode 100644 index 000000000..869a3738c --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -0,0 +1,3107 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `ServiceLevelObjective.View` determines what form of +// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`, +// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs. +type ServiceLevelObjective_View int32 + +const ( + // Same as FULL. + ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0 + // Return the embedded `ServiceLevelIndicator` in the form in which it was + // defined. If it was defined using a `BasicSli`, return that `BasicSli`. + ServiceLevelObjective_FULL ServiceLevelObjective_View = 2 + // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead + // return the `ServiceLevelIndicator` with its mode of computation fully + // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using + // `RequestBasedSli` or `WindowsBasedSli`, return the + // `ServiceLevelIndicator` as it was provided. + ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1 +) + +// Enum value maps for ServiceLevelObjective_View. +var ( + ServiceLevelObjective_View_name = map[int32]string{ + 0: "VIEW_UNSPECIFIED", + 2: "FULL", + 1: "EXPLICIT", + } + ServiceLevelObjective_View_value = map[string]int32{ + "VIEW_UNSPECIFIED": 0, + "FULL": 2, + "EXPLICIT": 1, + } +) + +func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View { + p := new(ServiceLevelObjective_View) + *p = x + return p +} + +func (x ServiceLevelObjective_View) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor() +} + +func (ServiceLevelObjective_View) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_service_proto_enumTypes[0] +} + +func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceLevelObjective_View.Descriptor instead. +func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0} +} + +// A `Service` is a discrete, autonomous, and network-accessible unit, designed +// to solve an individual concern +// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In +// Cloud Monitoring, a `Service` acts as the root resource under which +// operational aspects of the service are accessible. +type Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this Service. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this Service. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // REQUIRED. Service-identifying atoms specifying the underlying service. + // + // Types that are assignable to Identifier: + // + // *Service_Custom_ + // *Service_AppEngine_ + // *Service_CloudEndpoints_ + // *Service_ClusterIstio_ + // *Service_MeshIstio_ + // *Service_IstioCanonicalService_ + // *Service_CloudRun_ + // *Service_GkeNamespace_ + // *Service_GkeWorkload_ + // *Service_GkeService_ + Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Message that contains the service type and service labels of this service + // if it is a basic service. + // Documentation and examples + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"` + // Configuration for how to query telemetry on a Service. + Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + // Labels which have been used to annotate the service. Label keys must start + // with a letter. Label keys and values may contain lowercase letters, + // numbers, underscores, and dashes. Label keys and values have a maximum + // length of 63 characters, and must be less than 128 bytes in size. Up to 64 + // label entries may be stored. For labels which do not have a semantic value, + // the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,14,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service) Reset() { + *x = Service{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0} +} + +func (x *Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Service) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *Service) GetIdentifier() isService_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (x *Service) GetCustom() *Service_Custom { + if x, ok := x.GetIdentifier().(*Service_Custom_); ok { + return x.Custom + } + return nil +} + +func (x *Service) GetAppEngine() *Service_AppEngine { + if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok { + return x.AppEngine + } + return nil +} + +func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints { + if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok { + return x.CloudEndpoints + } + return nil +} + +func (x *Service) GetClusterIstio() *Service_ClusterIstio { + if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok { + return x.ClusterIstio + } + return nil +} + +func (x *Service) GetMeshIstio() *Service_MeshIstio { + if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok { + return x.MeshIstio + } + return nil +} + +func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService { + if x, ok := x.GetIdentifier().(*Service_IstioCanonicalService_); ok { + return x.IstioCanonicalService + } + return nil +} + +func (x *Service) GetCloudRun() *Service_CloudRun { + if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok { + return x.CloudRun + } + return nil +} + +func (x *Service) GetGkeNamespace() *Service_GkeNamespace { + if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok { + return x.GkeNamespace + } + return nil +} + +func (x *Service) GetGkeWorkload() *Service_GkeWorkload { + if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok { + return x.GkeWorkload + } + return nil +} + +func (x *Service) GetGkeService() *Service_GkeService { + if x, ok := x.GetIdentifier().(*Service_GkeService_); ok { + return x.GkeService + } + return nil +} + +func (x *Service) GetBasicService() *Service_BasicService { + if x != nil { + return x.BasicService + } + return nil +} + +func (x *Service) GetTelemetry() *Service_Telemetry { + if x != nil { + return x.Telemetry + } + return nil +} + +func (x *Service) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isService_Identifier interface { + isService_Identifier() +} + +type Service_Custom_ struct { + // Custom service type. + Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"` +} + +type Service_AppEngine_ struct { + // Type used for App Engine services. + AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"` +} + +type Service_CloudEndpoints_ struct { + // Type used for Cloud Endpoints services. + CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"` +} + +type Service_ClusterIstio_ struct { + // Type used for Istio services that live in a Kubernetes cluster. + ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"` +} + +type Service_MeshIstio_ struct { + // Type used for Istio services scoped to an Istio mesh. + MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"` +} + +type Service_IstioCanonicalService_ struct { + // Type used for canonical services scoped to an Istio mesh. + // Metrics for Istio are + // [documented here](https://istio.io/latest/docs/reference/config/metrics/) + IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"` +} + +type Service_CloudRun_ struct { + // Type used for Cloud Run services. + CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"` +} + +type Service_GkeNamespace_ struct { + // Type used for GKE Namespaces. + GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"` +} + +type Service_GkeWorkload_ struct { + // Type used for GKE Workloads. + GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"` +} + +type Service_GkeService_ struct { + // Type used for GKE Services (the Kubernetes concept of a service). + GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"` +} + +func (*Service_Custom_) isService_Identifier() {} + +func (*Service_AppEngine_) isService_Identifier() {} + +func (*Service_CloudEndpoints_) isService_Identifier() {} + +func (*Service_ClusterIstio_) isService_Identifier() {} + +func (*Service_MeshIstio_) isService_Identifier() {} + +func (*Service_IstioCanonicalService_) isService_Identifier() {} + +func (*Service_CloudRun_) isService_Identifier() {} + +func (*Service_GkeNamespace_) isService_Identifier() {} + +func (*Service_GkeWorkload_) isService_Identifier() {} + +func (*Service_GkeService_) isService_Identifier() {} + +// A Service-Level Objective (SLO) describes a level of desired good service. It +// consists of a service-level indicator (SLI), a performance goal, and a period +// over which the objective is to be evaluated against that goal. The SLO can +// use SLIs defined in a number of different manners. Typical SLOs might include +// "99% of requests in each rolling week have latency below 200 milliseconds" or +// "99.5% of requests in each calendar month return successfully." +type ServiceLevelObjective struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this `ServiceLevelObjective`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this SLO. + DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The definition of good service, used to measure and calculate the quality + // of the `Service`'s performance with respect to a single aspect of service + // quality. + ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"` + // The fraction of service that must be good in order for this objective to be + // met. `0 < goal <= 0.999`. + Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"` + // The time period over which the objective will be evaluated. + // + // Types that are assignable to Period: + // + // *ServiceLevelObjective_RollingPeriod + // *ServiceLevelObjective_CalendarPeriod + Period isServiceLevelObjective_Period `protobuf_oneof:"period"` + // Labels which have been used to annotate the service-level objective. Label + // keys must start with a letter. Label keys and values may contain lowercase + // letters, numbers, underscores, and dashes. Label keys and values have a + // maximum length of 63 characters, and must be less than 128 bytes in size. + // Up to 64 label entries may be stored. For labels which do not have a + // semantic value, the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,12,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ServiceLevelObjective) Reset() { + *x = ServiceLevelObjective{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelObjective) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelObjective) ProtoMessage() {} + +func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead. +func (*ServiceLevelObjective) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ServiceLevelObjective) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceLevelObjective) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator { + if x != nil { + return x.ServiceLevelIndicator + } + return nil +} + +func (x *ServiceLevelObjective) GetGoal() float64 { + if x != nil { + return x.Goal + } + return 0 +} + +func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { + if m != nil { + return m.Period + } + return nil +} + +func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { + return x.RollingPeriod + } + return nil +} + +func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok { + return x.CalendarPeriod + } + return calendarperiod.CalendarPeriod(0) +} + +func (x *ServiceLevelObjective) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isServiceLevelObjective_Period interface { + isServiceLevelObjective_Period() +} + +type ServiceLevelObjective_RollingPeriod struct { + // A rolling time period, semantically "in the past ``". + // Must be an integer multiple of 1 day no larger than 30 days. + RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` +} + +type ServiceLevelObjective_CalendarPeriod struct { + // A calendar period, semantically "since the start of the current + // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and + // `MONTH` are supported. + CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"` +} + +func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {} + +func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {} + +// A Service-Level Indicator (SLI) describes the "performance" of a service. For +// some services, the SLI is well-defined. In such cases, the SLI can be +// described easily by referencing the well-known SLI and providing the needed +// parameters. Alternatively, a "custom" SLI can be defined with a query to the +// underlying metric store. An SLI is defined to be `good_service / +// total_service` over any queried time interval. The value of performance +// always falls into the range `0 <= performance <= 1`. A custom SLI describes +// how to compute this ratio, whether this is by dividing values from a pair of +// time series, cutting a `Distribution` into good and bad counts, or counting +// time windows in which the service complies with a criterion. For separation +// of concerns, a single Service-Level Indicator measures performance for only +// one aspect of service quality, such as fraction of successful queries or +// fast-enough queries. +type ServiceLevelIndicator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Service level indicators can be grouped by whether the "unit" of service + // being measured is based on counts of good requests or on counts of good + // time windows + // + // Types that are assignable to Type: + // + // *ServiceLevelIndicator_BasicSli + // *ServiceLevelIndicator_RequestBased + // *ServiceLevelIndicator_WindowsBased + Type isServiceLevelIndicator_Type `protobuf_oneof:"type"` +} + +func (x *ServiceLevelIndicator) Reset() { + *x = ServiceLevelIndicator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelIndicator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelIndicator) ProtoMessage() {} + +func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead. +func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2} +} + +func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok { + return x.BasicSli + } + return nil +} + +func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok { + return x.RequestBased + } + return nil +} + +func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok { + return x.WindowsBased + } + return nil +} + +type isServiceLevelIndicator_Type interface { + isServiceLevelIndicator_Type() +} + +type ServiceLevelIndicator_BasicSli struct { + // Basic SLI on a well-known service type. + BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"` +} + +type ServiceLevelIndicator_RequestBased struct { + // Request-based SLIs + RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"` +} + +type ServiceLevelIndicator_WindowsBased struct { + // Windows-based SLIs + WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"` +} + +func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {} + +// An SLI measuring performance on a well-known service type. Performance will +// be computed on the basis of pre-defined metrics. The type of the +// `service_resource` determines the metrics to use and the +// `service_resource.labels` and `metric_labels` are used to construct a +// monitoring filter to filter that metric down to just the data relevant to +// this service. +type BasicSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from + // other methods will not be used to calculate performance for this SLI. If + // omitted, this SLI applies to all the Service's methods. For service types + // that don't support breaking down by method, setting this field will result + // in an error. + Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"` + // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry + // from other locations will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all locations in which the Service has + // activity. For service types that don't support breaking down by location, + // setting this field will result in an error. + Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"` + // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry + // from other API versions will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this field will result + // in an error. + Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"` + // This SLI can be evaluated on the basis of availability or latency. + // + // Types that are assignable to SliCriteria: + // + // *BasicSli_Availability + // *BasicSli_Latency + SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` +} + +func (x *BasicSli) Reset() { + *x = BasicSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli) ProtoMessage() {} + +func (x *BasicSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead. +func (*BasicSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicSli) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *BasicSli) GetLocation() []string { + if x != nil { + return x.Location + } + return nil +} + +func (x *BasicSli) GetVersion() []string { + if x != nil { + return x.Version + } + return nil +} + +func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria { + if m != nil { + return m.SliCriteria + } + return nil +} + +func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok { + return x.Availability + } + return nil +} + +func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok { + return x.Latency + } + return nil +} + +type isBasicSli_SliCriteria interface { + isBasicSli_SliCriteria() +} + +type BasicSli_Availability struct { + // Good service is defined to be the count of requests made to this service + // that return successfully. + Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"` +} + +type BasicSli_Latency struct { + // Good service is defined to be the count of requests made to this service + // that are fast enough with respect to `latency.threshold`. + Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"` +} + +func (*BasicSli_Availability) isBasicSli_SliCriteria() {} + +func (*BasicSli_Latency) isBasicSli_SliCriteria() {} + +// Range of numerical values within `min` and `max`. +type Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Range minimum. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // Range maximum. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Range) Reset() { + *x = Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Range) ProtoMessage() {} + +func (x *Range) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Range.ProtoReflect.Descriptor instead. +func (*Range) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4} +} + +func (x *Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// Service Level Indicators for which atomic units of service are counted +// directly. +type RequestBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means to compute a ratio of `good_service` to `total_service`. + // + // Types that are assignable to Method: + // + // *RequestBasedSli_GoodTotalRatio + // *RequestBasedSli_DistributionCut + Method isRequestBasedSli_Method `protobuf_oneof:"method"` +} + +func (x *RequestBasedSli) Reset() { + *x = RequestBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBasedSli) ProtoMessage() {} + +func (x *RequestBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead. +func (*RequestBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5} +} + +func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method { + if m != nil { + return m.Method + } + return nil +} + +func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio { + if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok { + return x.GoodTotalRatio + } + return nil +} + +func (x *RequestBasedSli) GetDistributionCut() *DistributionCut { + if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok { + return x.DistributionCut + } + return nil +} + +type isRequestBasedSli_Method interface { + isRequestBasedSli_Method() +} + +type RequestBasedSli_GoodTotalRatio struct { + // `good_total_ratio` is used when the ratio of `good_service` to + // `total_service` is computed from two `TimeSeries`. + GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"` +} + +type RequestBasedSli_DistributionCut struct { + // `distribution_cut` is used when `good_service` is a count of values + // aggregated in a `Distribution` that fall into a good range. The + // `total_service` is the total count of all values aggregated in the + // `Distribution`. + DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"` +} + +func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {} + +func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {} + +// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the +// `good_service / total_service` ratio. The specified `TimeSeries` must have +// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = +// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify +// exactly two of good, bad, and total, and the relationship `good_service + +// bad_service = total_service` will be assumed. +type TimeSeriesRatio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying good service provided. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying bad service, either demanded service + // that was not provided or demanded service that was of inadequate quality. + // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have + // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying total demanded service. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"` +} + +func (x *TimeSeriesRatio) Reset() { + *x = TimeSeriesRatio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesRatio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesRatio) ProtoMessage() {} + +func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead. +func (*TimeSeriesRatio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6} +} + +func (x *TimeSeriesRatio) GetGoodServiceFilter() string { + if x != nil { + return x.GoodServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetBadServiceFilter() string { + if x != nil { + return x.BadServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetTotalServiceFilter() string { + if x != nil { + return x.TotalServiceFilter + } + return "" +} + +// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring +// good service and total service. The `TimeSeries` must have `ValueType = +// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The +// computed `good_service` will be the estimated count of values in the +// `Distribution` that fall within the specified `min` and `max`. +type DistributionCut struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` aggregating values. Must have `ValueType = + // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound to + // an infinite value. + Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *DistributionCut) Reset() { + *x = DistributionCut{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributionCut) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributionCut) ProtoMessage() {} + +func (x *DistributionCut) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead. +func (*DistributionCut) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DistributionCut) GetDistributionFilter() string { + if x != nil { + return x.DistributionFilter + } + return "" +} + +func (x *DistributionCut) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +// A `WindowsBasedSli` defines `good_service` as the count of time windows for +// which the provided service was of good quality. Criteria for determining +// if service was good are embedded in the `window_criterion`. +type WindowsBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The criterion to use for evaluating window goodness. + // + // Types that are assignable to WindowCriterion: + // + // *WindowsBasedSli_GoodBadMetricFilter + // *WindowsBasedSli_GoodTotalRatioThreshold + // *WindowsBasedSli_MetricMeanInRange + // *WindowsBasedSli_MetricSumInRange + WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` + // Duration over which window quality is evaluated. Must be an integer + // fraction of a day and at least `60s`. + WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` +} + +func (x *WindowsBasedSli) Reset() { + *x = WindowsBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli) ProtoMessage() {} + +func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8} +} + +func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion { + if m != nil { + return m.WindowCriterion + } + return nil +} + +func (x *WindowsBasedSli) GetGoodBadMetricFilter() string { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok { + return x.GoodBadMetricFilter + } + return "" +} + +func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok { + return x.GoodTotalRatioThreshold + } + return nil +} + +func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok { + return x.MetricMeanInRange + } + return nil +} + +func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok { + return x.MetricSumInRange + } + return nil +} + +func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration { + if x != nil { + return x.WindowPeriod + } + return nil +} + +type isWindowsBasedSli_WindowCriterion interface { + isWindowsBasedSli_WindowCriterion() +} + +type WindowsBasedSli_GoodBadMetricFilter struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if + // any `true` values appear in the window. + GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"` +} + +type WindowsBasedSli_GoodTotalRatioThreshold struct { + // A window is good if its `performance` is high enough. + GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"` +} + +type WindowsBasedSli_MetricMeanInRange struct { + // A window is good if the metric's value is in a good range, averaged + // across returned streams. + MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"` +} + +type WindowsBasedSli_MetricSumInRange struct { + // A window is good if the metric's value is in a good range, summed across + // returned streams. + MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"` +} + +func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} + +// Use a custom service to designate a service that you want to monitor +// when none of the other service types (like App Engine, Cloud Run, or +// a GKE type) matches your intended service. +type Service_Custom struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Service_Custom) Reset() { + *x = Service_Custom{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Custom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Custom) ProtoMessage() {} + +func (x *Service_Custom) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead. +func (*Service_Custom) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0} +} + +// App Engine service. Learn more at https://cloud.google.com/appengine. +type Service_AppEngine struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the App Engine module underlying this service. Corresponds to + // the `module_id` resource label in the [`gae_app` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app). + ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` +} + +func (x *Service_AppEngine) Reset() { + *x = Service_AppEngine{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_AppEngine) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_AppEngine) ProtoMessage() {} + +func (x *Service_AppEngine) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead. +func (*Service_AppEngine) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Service_AppEngine) GetModuleId() string { + if x != nil { + return x.ModuleId + } + return "" +} + +// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. +type Service_CloudEndpoints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Endpoints service underlying this service. + // Corresponds to the `service` resource label in the [`api` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_api). + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *Service_CloudEndpoints) Reset() { + *x = Service_CloudEndpoints{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudEndpoints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudEndpoints) ProtoMessage() {} + +func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead. +func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Service_CloudEndpoints) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +// Istio service scoped to a single Kubernetes cluster. Learn more at +// https://istio.io. Clusters running OSS Istio will have their services +// ingested as this type. +type Service_ClusterIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `location` resource label in `k8s_cluster` + // resources. + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The name of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `cluster_name` resource label in + // `k8s_cluster` resources. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_ClusterIstio) Reset() { + *x = Service_ClusterIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_ClusterIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_ClusterIstio) ProtoMessage() {} + +func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead. +func (*Service_ClusterIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *Service_ClusterIstio) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_ClusterIstio) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8 +// will have their services ingested as this type. +type Service_MeshIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the mesh in which this Istio service is defined. + // Corresponds to the `mesh_uid` metric label in Istio metrics. + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_MeshIstio) Reset() { + *x = Service_MeshIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_MeshIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_MeshIstio) ProtoMessage() {} + +func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead. +func (*Service_MeshIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *Service_MeshIstio) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_MeshIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_MeshIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Canonical service scoped to an Istio mesh. Anthos clusters running ASM >= +// 1.6.8 will have their services ingested as this type. +type Service_IstioCanonicalService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the Istio mesh in which this canonical service is defined. + // Corresponds to the `mesh_uid` metric label in + // [Istio metrics](https://cloud.google.com/monitoring/api/metrics_istio). + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_namespace` metric + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalServiceNamespace string `protobuf:"bytes,3,opt,name=canonical_service_namespace,json=canonicalServiceNamespace,proto3" json:"canonical_service_namespace,omitempty"` + // The name of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_name` metric label in + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalService string `protobuf:"bytes,4,opt,name=canonical_service,json=canonicalService,proto3" json:"canonical_service,omitempty"` +} + +func (x *Service_IstioCanonicalService) Reset() { + *x = Service_IstioCanonicalService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_IstioCanonicalService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_IstioCanonicalService) ProtoMessage() {} + +func (x *Service_IstioCanonicalService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_IstioCanonicalService.ProtoReflect.Descriptor instead. +func (*Service_IstioCanonicalService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *Service_IstioCanonicalService) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalServiceNamespace() string { + if x != nil { + return x.CanonicalServiceNamespace + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalService() string { + if x != nil { + return x.CanonicalService + } + return "" +} + +// Cloud Run service. Learn more at https://cloud.google.com/run. +type Service_CloudRun struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Run service. Corresponds to the `service_name` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The location the service is run. Corresponds to the `location` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Service_CloudRun) Reset() { + *x = Service_CloudRun{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudRun) ProtoMessage() {} + +func (x *Service_CloudRun) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead. +func (*Service_CloudRun) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *Service_CloudRun) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *Service_CloudRun) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +// GKE Namespace. The field names correspond to the resource metadata labels +// on monitored resources that fall under a namespace (for example, +// `k8s_container` or `k8s_pod`). +type Service_GkeNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of this namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` +} + +func (x *Service_GkeNamespace) Reset() { + *x = Service_GkeNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeNamespace) ProtoMessage() {} + +func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead. +func (*Service_GkeNamespace) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *Service_GkeNamespace) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeNamespace) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeNamespace) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeNamespace) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond +// to the metadata labels on monitored resources that fall under a workload +// (for example, `k8s_container` or `k8s_pod`). +type Service_GkeWorkload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The type of this workload (for example, "Deployment" or "DaemonSet") + TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"` + // The name of this workload. + TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"` +} + +func (x *Service_GkeWorkload) Reset() { + *x = Service_GkeWorkload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeWorkload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeWorkload) ProtoMessage() {} + +func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead. +func (*Service_GkeWorkload) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Service_GkeWorkload) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeWorkload) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeWorkload) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeWorkload) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerType() string { + if x != nil { + return x.TopLevelControllerType + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerName() string { + if x != nil { + return x.TopLevelControllerName + } + return "" +} + +// GKE Service. The "service" here represents a +// [Kubernetes service +// object](https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on [`k8s_service` +// monitored +// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type Service_GkeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The name of this service. + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_GkeService) Reset() { + *x = Service_GkeService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeService) ProtoMessage() {} + +func (x *Service_GkeService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead. +func (*Service_GkeService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Service_GkeService) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeService) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeService) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeService) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeService) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// A well-known service type, defined by its service type and service labels. +// Documentation and examples +// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type Service_BasicService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of service that this basic service defines, e.g. + // APP_ENGINE service type. + // Documentation and valid values + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Labels that specify the resource that emits the monitoring data which + // is used for SLO reporting of this `Service`. + // Documentation and valid values for given service types + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service_BasicService) Reset() { + *x = Service_BasicService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_BasicService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_BasicService) ProtoMessage() {} + +func (x *Service_BasicService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead. +func (*Service_BasicService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Service_BasicService) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *Service_BasicService) GetServiceLabels() map[string]string { + if x != nil { + return x.ServiceLabels + } + return nil +} + +// Configuration for how to query telemetry on a Service. +type Service_Telemetry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full name of the resource that defines this service. Formatted as + // described in https://cloud.google.com/apis/design/resource_names. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` +} + +func (x *Service_Telemetry) Reset() { + *x = Service_Telemetry{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Telemetry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Telemetry) ProtoMessage() {} + +func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. +func (*Service_Telemetry) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *Service_Telemetry) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +// Future parameters for the availability SLI. +type BasicSli_AvailabilityCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BasicSli_AvailabilityCriteria) Reset() { + *x = BasicSli_AvailabilityCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_AvailabilityCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} + +func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0} +} + +// Parameters for a latency threshold SLI. +type BasicSli_LatencyCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Good service is defined to be the count of requests made to this service + // that return in no more than `threshold`. + Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *BasicSli_LatencyCriteria) Reset() { + *x = BasicSli_LatencyCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_LatencyCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_LatencyCriteria) ProtoMessage() {} + +func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration { + if x != nil { + return x.Threshold + } + return nil +} + +// A `PerformanceThreshold` is used when each window is good when that window +// has a sufficiently high `performance`. +type WindowsBasedSli_PerformanceThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means, either a request-based SLI or a basic SLI, by which to compute + // performance over a window. + // + // Types that are assignable to Type: + // + // *WindowsBasedSli_PerformanceThreshold_Performance + // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance + Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` + // If window `performance >= threshold`, the window is counted as good. + Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *WindowsBasedSli_PerformanceThreshold) Reset() { + *x = WindowsBasedSli_PerformanceThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_PerformanceThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} + +func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok { + return x.Performance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok { + return x.BasicSliPerformance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +type isWindowsBasedSli_PerformanceThreshold_Type interface { + isWindowsBasedSli_PerformanceThreshold_Type() +} + +type WindowsBasedSli_PerformanceThreshold_Performance struct { + // `RequestBasedSli` to evaluate to judge window quality. + Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"` +} + +type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct { + // `BasicSli` to evaluate to judge window quality. + BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"` +} + +func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +// A `MetricRange` is used when each window is good when the value x of a +// single `TimeSeries` satisfies `range.min <= x <= range.max`. The provided +// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and +// `MetricKind = GAUGE`. +type WindowsBasedSli_MetricRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying the `TimeSeries` to use for evaluating window quality. + TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound + // to an infinite value. + Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *WindowsBasedSli_MetricRange) Reset() { + *x = WindowsBasedSli_MetricRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_MetricRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_MetricRange) ProtoMessage() {} + +func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string { + if x != nil { + return x.TimeSeries + } + return "" +} + +func (x *WindowsBasedSli_MetricRange) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, + 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, + 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, + 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, + 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a, + 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, + 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, + 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, + 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, + 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, + 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, + 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, + 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, + 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, + 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, + 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, + 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, + 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_monitoring_v3_service_proto_goTypes = []any{ + (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View + (*Service)(nil), // 1: google.monitoring.v3.Service + (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective + (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator + (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli + (*Range)(nil), // 5: google.monitoring.v3.Range + (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli + (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio + (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut + (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli + (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom + (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine + (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints + (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio + (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio + (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService + (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun + (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace + (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload + (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService + (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService + (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry + nil, // 22: google.monitoring.v3.Service.UserLabelsEntry + nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria + (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria + (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod +} +var file_google_monitoring_v3_service_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom + 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine + 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints + 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio + 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio + 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService + 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun + 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace + 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload + 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService + 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService + 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry + 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry + 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator + 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration + 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod + 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli + 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli + 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli + 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria + 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria + 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio + 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut + 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range + 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration + 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration + 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli + 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli + 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_proto_init() } +func file_google_monitoring_v3_service_proto_init() { + if File_google_monitoring_v3_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelObjective); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelIndicator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RequestBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesRatio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DistributionCut); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*Service_Custom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*Service_AppEngine); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudEndpoints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Service_ClusterIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Service_MeshIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*Service_IstioCanonicalService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudRun); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeWorkload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Service_BasicService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*Service_Telemetry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_AvailabilityCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_LatencyCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_MetricRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{ + (*Service_Custom_)(nil), + (*Service_AppEngine_)(nil), + (*Service_CloudEndpoints_)(nil), + (*Service_ClusterIstio_)(nil), + (*Service_MeshIstio_)(nil), + (*Service_IstioCanonicalService_)(nil), + (*Service_CloudRun_)(nil), + (*Service_GkeNamespace_)(nil), + (*Service_GkeWorkload_)(nil), + (*Service_GkeService_)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{ + (*ServiceLevelObjective_RollingPeriod)(nil), + (*ServiceLevelObjective_CalendarPeriod)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{ + (*ServiceLevelIndicator_BasicSli)(nil), + (*ServiceLevelIndicator_RequestBased)(nil), + (*ServiceLevelIndicator_WindowsBased)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{ + (*BasicSli_Availability)(nil), + (*BasicSli_Latency)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{ + (*RequestBasedSli_GoodTotalRatio)(nil), + (*RequestBasedSli_DistributionCut)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{ + (*WindowsBasedSli_GoodBadMetricFilter)(nil), + (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), + (*WindowsBasedSli_MetricMeanInRange)(nil), + (*WindowsBasedSli_MetricSumInRange)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{ + (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), + (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 28, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_proto = out.File + file_google_monitoring_v3_service_proto_rawDesc = nil + file_google_monitoring_v3_service_proto_goTypes = nil + file_google_monitoring_v3_service_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go new file mode 100644 index 000000000..15e1f04d6 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -0,0 +1,1796 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `CreateService` request. +type CreateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource + // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the + // parent Metrics Scope. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The Service id to use for this Service. If omitted, an id will be + // generated instead. Must match the pattern `[a-z0-9\-]+` + ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Required. The `Service` to create. + Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *CreateServiceRequest) Reset() { + *x = CreateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceRequest) ProtoMessage() {} + +func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateServiceRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *CreateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +// The `GetService` request. +type GetServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetServiceRequest) Reset() { + *x = GetServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceRequest) ProtoMessage() {} + +func (x *GetServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead. +func (*GetServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListServices` request. +type ListServicesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed services, + // either a [project](https://cloud.google.com/monitoring/api/v3#project_name) + // or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `Service`s to return. The filter supports + // filtering on a particular service-identifier type or one of its attributes. + // + // To filter on a particular service-identifier type, the `identifier_case` + // refers to which option in the `identifier` field is populated. For example, + // the filter `identifier_case = "CUSTOM"` would match all services with a + // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE", + // "MESH_ISTIO", and the other options listed at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + // + // To filter on an attribute of a service-identifier type, apply the filter + // name by using the snake case of the service-identifier type and the + // attribute of that service-identifier type, and join the two with a period. + // For example, to filter by the `meshUid` field of the `MeshIstio` + // service-identifier type, you must filter on `mesh_istio.mesh_uid = + // "123"` to match all services with mesh UID "123". Service-identifier types + // and their attributes are described at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListServicesRequest) Reset() { + *x = ListServicesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesRequest) ProtoMessage() {} + +func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListServicesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServicesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServicesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServicesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListServices` response. +type ListServicesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `Service`s matching the specified filter. + Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServicesResponse) Reset() { + *x = ListServicesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesResponse) ProtoMessage() {} + +func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListServicesResponse) GetServices() []*Service { + if x != nil { + return x.Services + } + return nil +} + +func (x *ListServicesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateService` request. +type UpdateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Service` to draw updates from. + // The given `name` specifies the resource to update. + Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceRequest) Reset() { + *x = UpdateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceRequest) ProtoMessage() {} + +func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteService` request. +type DeleteServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service` to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceRequest) Reset() { + *x = DeleteServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceRequest) ProtoMessage() {} + +func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateServiceLevelObjective` request. +type CreateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The ServiceLevelObjective id to use for this + // ServiceLevelObjective. If omitted, an id will be generated instead. Must + // match the pattern `^[a-zA-Z0-9-_:.]+$` + ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` + // Required. The `ServiceLevelObjective` to create. + // The provided `name` will be respected if no `ServiceLevelObjective` exists + // with this name. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` +} + +func (x *CreateServiceLevelObjectiveRequest) Reset() { + *x = CreateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateServiceLevelObjectiveRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string { + if x != nil { + return x.ServiceLevelObjectiveId + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +// The `GetServiceLevelObjective` request. +type GetServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to get. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *GetServiceLevelObjectiveRequest) Reset() { + *x = GetServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` request. +type ListServiceLevelObjectivesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed SLOs, either a + // project or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `ServiceLevelObjective`s to return. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *ListServiceLevelObjectivesRequest) Reset() { + *x = ListServiceLevelObjectivesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesRequest) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListServiceLevelObjectivesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServiceLevelObjectivesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` response. +type ListServiceLevelObjectivesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `ServiceLevelObjective`s matching the specified filter. + ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServiceLevelObjectivesResponse) Reset() { + *x = ListServiceLevelObjectivesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesResponse) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjectives + } + return nil +} + +func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateServiceLevelObjective` request. +type UpdateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `ServiceLevelObjective` to draw updates from. + // The given `name` specifies the resource to update. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceLevelObjectiveRequest) Reset() { + *x = UpdateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteServiceLevelObjective` request. +type DeleteServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to delete. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceLevelObjectiveRequest) Reset() { + *x = DeleteServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11} +} + +func (x *DeleteServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49, + 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04, + 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, + 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, + 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, + 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a, + 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x32, + 0x21, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0xda, 0x41, 0x1e, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x4d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0xc1, + 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, + 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x22, 0x85, 0x01, 0xda, 0x41, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x65, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x32, 0x4a, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, + 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd8, 0x01, 0x0a, 0x18, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_google_monitoring_v3_service_service_proto_goTypes = []any{ + (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest + (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest + (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest + (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse + (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest + (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest + (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest + (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest + (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest + (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse + (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest + (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest + (*Service)(nil), // 12: google.monitoring.v3.Service + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective + (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View + (*emptypb.Empty)(nil), // 16: google.protobuf.Empty +} +var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{ + 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service + 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service + 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service + 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective + 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest + 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest + 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest + 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest + 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest + 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest + 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest + 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest + 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest + 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest + 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service + 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service + 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse + 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service + 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty + 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse + 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty + 20, // [20:30] is the sub-list for method output_type + 10, // [10:20] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_service_proto_init() } +func file_google_monitoring_v3_service_service_proto_init() { + if File_google_monitoring_v3_service_service_proto != nil { + return + } + file_google_monitoring_v3_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_service_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_service_proto = out.File + file_google_monitoring_v3_service_service_proto_rawDesc = nil + file_google_monitoring_v3_service_service_proto_goTypes = nil + file_google_monitoring_v3_service_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServiceMonitoringServiceClient interface { + // Create a `Service`. + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Get the named `Service`. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Soft delete this `Service`. + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type serviceMonitoringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient { + return &serviceMonitoringServiceClient{cc} +} + +func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) { + out := new(ListServiceLevelObjectivesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service. +type ServiceMonitoringServiceServer interface { + // Create a `Service`. + CreateService(context.Context, *CreateServiceRequest) (*Service, error) + // Get the named `Service`. + GetService(context.Context, *GetServiceRequest) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) + // Soft delete this `Service`. + DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) +} + +// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedServiceMonitoringServiceServer struct { +} + +func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") +} + +func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) { + s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv) +} + +func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceLevelObjectivesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.ServiceMonitoringService", + HandlerType: (*ServiceMonitoringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateService", + Handler: _ServiceMonitoringService_CreateService_Handler, + }, + { + MethodName: "GetService", + Handler: _ServiceMonitoringService_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _ServiceMonitoringService_ListServices_Handler, + }, + { + MethodName: "UpdateService", + Handler: _ServiceMonitoringService_UpdateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _ServiceMonitoringService_DeleteService_Handler, + }, + { + MethodName: "CreateServiceLevelObjective", + Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler, + }, + { + MethodName: "GetServiceLevelObjective", + Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler, + }, + { + MethodName: "ListServiceLevelObjectives", + Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler, + }, + { + MethodName: "UpdateServiceLevelObjective", + Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler, + }, + { + MethodName: "DeleteServiceLevelObjective", + Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/service_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go new file mode 100644 index 000000000..ab4986804 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -0,0 +1,315 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `Snooze` will prevent any alerts from being opened, and close any that +// are already open. The `Snooze` will work on alerts that match the +// criteria defined in the `Snooze`. The `Snooze` will be active from +// `interval.start_time` through `interval.end_time`. +type Snooze struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the `Snooze`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // + // The ID of the `Snooze` will be generated by the system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. This defines the criteria for applying the `Snooze`. See + // `Criteria` for more information. + Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"` + // Required. The `Snooze` will be active from `interval.start_time` through + // `interval.end_time`. + // `interval.start_time` cannot be in the past. There is a 15 second clock + // skew to account for the time it takes for a request to reach the API from + // the UI. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Required. A display name for the `Snooze`. This can be, at most, 512 + // unicode characters. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` +} + +func (x *Snooze) Reset() { + *x = Snooze{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze) ProtoMessage() {} + +func (x *Snooze) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze.ProtoReflect.Descriptor instead. +func (*Snooze) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0} +} + +func (x *Snooze) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snooze) GetCriteria() *Snooze_Criteria { + if x != nil { + return x.Criteria + } + return nil +} + +func (x *Snooze) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Snooze) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The +// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s +// whose names are supplied. +type Snooze_Criteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The specific `AlertPolicy` names for the alert that should be snoozed. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] + // + // There is a limit of 16 policies per snooze. This limit is checked during + // snooze creation. + Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *Snooze_Criteria) Reset() { + *x = Snooze_Criteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze_Criteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze_Criteria) ProtoMessage() {} + +func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead. +func (*Snooze_Criteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Snooze_Criteria) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, 0x02, 0x0a, 0x06, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x52, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x4a, 0xea, 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_snooze_proto_goTypes = []any{ + (*Snooze)(nil), // 0: google.monitoring.v3.Snooze + (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria + (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval +} +var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria + 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_proto_init() } +func file_google_monitoring_v3_snooze_proto_init() { + if File_google_monitoring_v3_snooze_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Snooze); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Snooze_Criteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_snooze_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_proto = out.File + file_google_monitoring_v3_snooze_proto_rawDesc = nil + file_google_monitoring_v3_snooze_proto_goTypes = nil + file_google_monitoring_v3_snooze_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go new file mode 100644 index 000000000..39388a998 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -0,0 +1,867 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message definition for creating a `Snooze`. Users must provide the body +// of the `Snooze` to be created but must omit the `Snooze` field, `name`. +type CreateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // a `Snooze` should be created. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The `Snooze` to create. Omit the `name` field, as it will be + // filled in by the API. + Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"` +} + +func (x *CreateSnoozeRequest) Reset() { + *x = CreateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnoozeRequest) ProtoMessage() {} + +func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSnoozeRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +// The message definition for listing `Snooze`s associated with the given +// `parent`, satisfying the optional `filter`. +type ListSnoozesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // `Snooze`s should be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional filter to restrict results to the given criteria. The + // following fields are supported. + // + // - `interval.start_time` + // - `interval.end_time` + // + // For example: + // + // ``` + // interval.start_time > "2022-03-11T00:00:00-08:00" AND + // interval.end_time < "2022-03-12T00:00:00-08:00" + // ``` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. The maximum number of results to return for a single query. The + // server may further constrain the maximum number of results returned in a + // single page. The value should be in the range [1, 1000]. If the value given + // is outside this range, the server will decide the number of results to be + // returned. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The `next_page_token` from a previous call to + // `ListSnoozesRequest` to get the next page of results. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnoozesRequest) Reset() { + *x = ListSnoozesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesRequest) ProtoMessage() {} + +func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead. +func (*ListSnoozesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListSnoozesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListSnoozesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListSnoozesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnoozesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The results of a successful `ListSnoozes` call, containing the matching +// `Snooze`s. +type ListSnoozesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Snooze`s matching this list call. + Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"` + // Page token for repeated calls to `ListSnoozes`, to fetch additional pages + // of results. If this is empty or missing, there are no more pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnoozesResponse) Reset() { + *x = ListSnoozesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesResponse) ProtoMessage() {} + +func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead. +func (*ListSnoozesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListSnoozesResponse) GetSnoozes() []*Snooze { + if x != nil { + return x.Snoozes + } + return nil +} + +func (x *ListSnoozesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The message definition for retrieving a `Snooze`. Users must specify the +// field, `name`, which identifies the `Snooze`. +type GetSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The ID of the `Snooze` to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetSnoozeRequest) Reset() { + *x = GetSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnoozeRequest) ProtoMessage() {} + +func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead. +func (*GetSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetSnoozeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The message definition for updating a `Snooze`. The field, `snooze.name` +// identifies the `Snooze` to be updated. The remainder of `snooze` gives the +// content the `Snooze` in question will be assigned. +// +// What fields can be updated depends on the start time and end time of the +// `Snooze`. +// +// - end time is in the past: These `Snooze`s are considered +// read-only and cannot be updated. +// - start time is in the past and end time is in the future: `display_name` +// and `interval.end_time` can be updated. +// - start time is in the future: `display_name`, `interval.start_time` and +// `interval.end_time` can be updated. +type UpdateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Snooze` to update. Must have the name field present. + Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"` + // Required. The fields to update. + // + // For each field listed in `update_mask`: + // + // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a + // value for that field, the value of the field in the existing `Snooze` + // will be set to the value of the field in the supplied `Snooze`. + // - If the field does not have a value in the supplied `Snooze`, the field + // in the existing `Snooze` is set to its default value. + // + // Fields not listed retain their existing value. + // + // The following are the field names that are accepted in `update_mask`: + // + // - `display_name` + // - `interval.start_time` + // - `interval.end_time` + // + // That said, the start time and end time of the `Snooze` determines which + // fields can legally be updated. Before attempting an update, users should + // consult the documentation for `UpdateSnoozeRequest`, which talks about + // which fields can be updated. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnoozeRequest) Reset() { + *x = UpdateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnoozeRequest) ProtoMessage() {} + +func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9, + 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, + 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_service_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{ + (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest + (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest + (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse + (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest + (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest + (*Snooze)(nil), // 5: google.monitoring.v3.Snooze + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask +} +var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze + 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest + 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest + 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest + 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest + 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze + 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse + 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze + 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_service_proto_init() } +func file_google_monitoring_v3_snooze_service_proto_init() { + if File_google_monitoring_v3_snooze_service_proto != nil { + return + } + file_google_monitoring_v3_snooze_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_service_proto = out.File + file_google_monitoring_v3_snooze_service_proto_rawDesc = nil + file_google_monitoring_v3_snooze_service_proto_goTypes = nil + file_google_monitoring_v3_snooze_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SnoozeServiceClient is the client API for SnoozeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SnoozeServiceClient interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) +} + +type snoozeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient { + return &snoozeServiceClient{cc} +} + +func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) { + out := new(ListSnoozesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnoozeServiceServer is the server API for SnoozeService service. +type SnoozeServiceServer interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) +} + +// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSnoozeServiceServer struct { +} + +func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented") +} +func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented") +} + +func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) { + s.RegisterService(&_SnoozeService_serviceDesc, srv) +} + +func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnoozesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).GetSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SnoozeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.SnoozeService", + HandlerType: (*SnoozeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSnooze", + Handler: _SnoozeService_CreateSnooze_Handler, + }, + { + MethodName: "ListSnoozes", + Handler: _SnoozeService_ListSnoozes_Handler, + }, + { + MethodName: "GetSnooze", + Handler: _SnoozeService_GetSnooze_Handler, + }, + { + MethodName: "UpdateSnooze", + Handler: _SnoozeService_UpdateSnooze_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/snooze_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go new file mode 100644 index 000000000..5a55ecc66 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -0,0 +1,188 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/span_context.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The context of a span. This is attached to an +// [Exemplar][google.api.Distribution.Exemplar] +// in [Distribution][google.api.Distribution] values during aggregation. +// +// It contains the name of a span with format: +// +// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the span. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // `[TRACE_ID]` is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // `[SPAN_ID]` is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` +} + +func (x *SpanContext) Reset() { + *x = SpanContext{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpanContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanContext) ProtoMessage() {} + +func (x *SpanContext) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead. +func (*SpanContext) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SpanContext) GetSpanName() string { + if x != nil { + return x.SpanName + } + return "" +} + +var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22, + 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, + 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once + file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc +) + +func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData) + }) + return file_google_monitoring_v3_span_context_proto_rawDescData +} + +var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_span_context_proto_goTypes = []any{ + (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext +} +var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_span_context_proto_init() } +func file_google_monitoring_v3_span_context_proto_init() { + if File_google_monitoring_v3_span_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SpanContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_span_context_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes, + }.Build() + File_google_monitoring_v3_span_context_proto = out.File + file_google_monitoring_v3_span_context_proto_rawDesc = nil + file_google_monitoring_v3_span_context_proto_goTypes = nil + file_google_monitoring_v3_span_context_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go new file mode 100644 index 000000000..e0b9e4a38 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -0,0 +1,2726 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The regions from which an Uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in Uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 + // Allows checks to run from locations within the western United States of + // America + UptimeCheckRegion_USA_OREGON UptimeCheckRegion = 5 + // Allows checks to run from locations within the central United States of + // America + UptimeCheckRegion_USA_IOWA UptimeCheckRegion = 6 + // Allows checks to run from locations within the eastern United States of + // America + UptimeCheckRegion_USA_VIRGINIA UptimeCheckRegion = 7 +) + +// Enum value maps for UptimeCheckRegion. +var ( + UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", + 5: "USA_OREGON", + 6: "USA_IOWA", + 7: "USA_VIRGINIA", + } + UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, + "USA_OREGON": 5, + "USA_IOWA": 6, + "USA_VIRGINIA": 7, + } +) + +func (x UptimeCheckRegion) Enum() *UptimeCheckRegion { + p := new(UptimeCheckRegion) + *p = x + return p +} + +func (x UptimeCheckRegion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor() +} + +func (UptimeCheckRegion) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[0] +} + +func (x UptimeCheckRegion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckRegion.Descriptor instead. +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +// Enum value maps for GroupResourceType. +var ( + GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", + } + GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, + } +) + +func (x GroupResourceType) Enum() *GroupResourceType { + p := new(GroupResourceType) + *p = x + return p +} + +func (x GroupResourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor() +} + +func (GroupResourceType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[1] +} + +func (x GroupResourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GroupResourceType.Descriptor instead. +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +// Operational states for an internal checker. +type InternalChecker_State int32 + +const ( + // An internal checker should never be in the unspecified state. + InternalChecker_UNSPECIFIED InternalChecker_State = 0 + // The checker is being created, provisioned, and configured. A checker in + // this state can be returned by `ListInternalCheckers` or + // `GetInternalChecker`, as well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + InternalChecker_CREATING InternalChecker_State = 1 + // The checker is running and available for use. A checker in this state + // can be returned by `ListInternalCheckers` or `GetInternalChecker` as + // well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + // If a checker is being torn down, it is neither visible nor usable, so + // there is no "deleting" or "down" state. + InternalChecker_RUNNING InternalChecker_State = 2 +) + +// Enum value maps for InternalChecker_State. +var ( + InternalChecker_State_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CREATING", + 2: "RUNNING", + } + InternalChecker_State_value = map[string]int32{ + "UNSPECIFIED": 0, + "CREATING": 1, + "RUNNING": 2, + } +) + +func (x InternalChecker_State) Enum() *InternalChecker_State { + p := new(InternalChecker_State) + *p = x + return p +} + +func (x InternalChecker_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor() +} + +func (InternalChecker_State) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[2] +} + +func (x InternalChecker_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use InternalChecker_State.Descriptor instead. +func (InternalChecker_State) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0} +} + +// What kind of checkers are available to be used by the check. +type UptimeCheckConfig_CheckerType int32 + +const ( + // The default checker type. Currently converted to `STATIC_IP_CHECKERS` + // on creation, the default conversion behavior may change in the future. + UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED UptimeCheckConfig_CheckerType = 0 + // `STATIC_IP_CHECKERS` are used for uptime checks that perform egress + // across the public internet. `STATIC_IP_CHECKERS` use the static IP + // addresses returned by `ListUptimeCheckIps`. + UptimeCheckConfig_STATIC_IP_CHECKERS UptimeCheckConfig_CheckerType = 1 + // `VPC_CHECKERS` are used for uptime checks that perform egress using + // Service Directory and private network access. When using `VPC_CHECKERS`, + // the monitored resource type must be `servicedirectory_service`. + UptimeCheckConfig_VPC_CHECKERS UptimeCheckConfig_CheckerType = 3 +) + +// Enum value maps for UptimeCheckConfig_CheckerType. +var ( + UptimeCheckConfig_CheckerType_name = map[int32]string{ + 0: "CHECKER_TYPE_UNSPECIFIED", + 1: "STATIC_IP_CHECKERS", + 3: "VPC_CHECKERS", + } + UptimeCheckConfig_CheckerType_value = map[string]int32{ + "CHECKER_TYPE_UNSPECIFIED": 0, + "STATIC_IP_CHECKERS": 1, + "VPC_CHECKERS": 3, + } +) + +func (x UptimeCheckConfig_CheckerType) Enum() *UptimeCheckConfig_CheckerType { + p := new(UptimeCheckConfig_CheckerType) + *p = x + return p +} + +func (x UptimeCheckConfig_CheckerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_CheckerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor() +} + +func (UptimeCheckConfig_CheckerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[3] +} + +func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead. +func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +// The HTTP request method options. +type UptimeCheckConfig_HttpCheck_RequestMethod int32 + +const ( + // No request method specified. + UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0 + // GET request. + UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1 + // POST request. + UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod. +var ( + UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "GET", + 2: "POST", + } + UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "GET": 1, + "POST": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod { + p := new(UptimeCheckConfig_HttpCheck_RequestMethod) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[4] +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +// Header options corresponding to the content type of a HTTP request body. +type UptimeCheckConfig_HttpCheck_ContentType int32 + +const ( + // No content type specified. + UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0 + // `body` is in URL-encoded form. Equivalent to setting the `Content-Type` + // to `application/x-www-form-urlencoded` in the HTTP request. + UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1 + // `body` is in `custom_content_type` form. Equivalent to setting the + // `Content-Type` to the contents of `custom_content_type` in the HTTP + // request. + UptimeCheckConfig_HttpCheck_USER_PROVIDED UptimeCheckConfig_HttpCheck_ContentType = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType. +var ( + UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "URL_ENCODED", + 2: "USER_PROVIDED", + } + UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "URL_ENCODED": 1, + "USER_PROVIDED": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType { + p := new(UptimeCheckConfig_HttpCheck_ContentType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[5] +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +// An HTTP status code class. +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass int32 + +const ( + // Default value that matches no status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 0 + // The class of status codes between 100 and 199. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_1XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 100 + // The class of status codes between 200 and 299. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_2XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 200 + // The class of status codes between 300 and 399. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_3XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 300 + // The class of status codes between 400 and 499. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_4XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 400 + // The class of status codes between 500 and 599. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_5XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 500 + // The class of all status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_ANY UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 1000 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass. +var ( + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_name = map[int32]string{ + 0: "STATUS_CLASS_UNSPECIFIED", + 100: "STATUS_CLASS_1XX", + 200: "STATUS_CLASS_2XX", + 300: "STATUS_CLASS_3XX", + 400: "STATUS_CLASS_4XX", + 500: "STATUS_CLASS_5XX", + 1000: "STATUS_CLASS_ANY", + } + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_value = map[string]int32{ + "STATUS_CLASS_UNSPECIFIED": 0, + "STATUS_CLASS_1XX": 100, + "STATUS_CLASS_2XX": 200, + "STATUS_CLASS_3XX": 300, + "STATUS_CLASS_4XX": 400, + "STATUS_CLASS_5XX": 500, + "STATUS_CLASS_ANY": 1000, + } +) + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Enum() *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + p := new(UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[6].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[6] +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0} +} + +// Type of authentication. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32 + +const ( + // Default value, will result in OIDC Authentication. + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0 + // OIDC Authentication + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType. +var ( + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{ + 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED", + 1: "OIDC_TOKEN", + } + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{ + "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0, + "OIDC_TOKEN": 1, + } +) + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[7] +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0} +} + +// Options to perform content matching. +type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32 + +const ( + // No content matcher type specified (maintained for backward + // compatibility, but deprecated for future use). + // Treated as `CONTAINS_STRING`. + UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0 + // Selects substring matching. The match succeeds if the output contains + // the `content` string. This is the default value for checks without + // a `matcher` option, or where the value of `matcher` is + // `CONTENT_MATCHER_OPTION_UNSPECIFIED`. + UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1 + // Selects negation of substring matching. The match succeeds if the + // output does _NOT_ contain the `content` string. + UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2 + // Selects regular-expression matching. The match succeeds if the output + // matches the regular expression specified in the `content` string. + // Regex matching is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3 + // Selects negation of regular-expression matching. The match succeeds if + // the output does _NOT_ match the regular expression specified in the + // `content` string. Regex matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. JSONPath matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 5 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. Succeeds when output does _NOT_ match as specified. + // JSONPath is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 6 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{ + 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED", + 1: "CONTAINS_STRING", + 2: "NOT_CONTAINS_STRING", + 3: "MATCHES_REGEX", + 4: "NOT_MATCHES_REGEX", + 5: "MATCHES_JSON_PATH", + 6: "NOT_MATCHES_JSON_PATH", + } + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{ + "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0, + "CONTAINS_STRING": 1, + "NOT_CONTAINS_STRING": 2, + "MATCHES_REGEX": 3, + "NOT_MATCHES_REGEX": 4, + "MATCHES_JSON_PATH": 5, + "NOT_MATCHES_JSON_PATH": 6, + } +) + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[8] +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +// Options to perform JSONPath content matching. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption int32 + +const ( + // No JSONPath matcher type specified (not valid). + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 0 + // Selects 'exact string' matching. The match succeeds if the content at + // the `json_path` within the output is exactly the same as the + // `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_EXACT_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 1 + // Selects regular-expression matching. The match succeeds if the + // content at the `json_path` within the output matches the regular + // expression specified in the `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_REGEX_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 2 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_name = map[int32]string{ + 0: "JSON_PATH_MATCHER_OPTION_UNSPECIFIED", + 1: "EXACT_MATCH", + 2: "REGEX_MATCH", + } + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_value = map[string]int32{ + "JSON_PATH_MATCHER_OPTION_UNSPECIFIED": 0, + "EXACT_MATCH": 1, + "REGEX_MATCH": 2, + } +) + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[9] +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0} +} + +// An internal checker allows Uptime checks to run on private/internal GCP +// resources. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +type InternalChecker struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique resource name for this InternalChecker. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for + // the Uptime check config associated with the internal checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The checker's human-readable name. The display name + // should be unique within a Cloud Monitoring Metrics Scope in order to make + // it easier to identify; however, uniqueness is not enforced. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the + // internal resource lives (ex: "default"). + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the Uptime check should egress from. Only respected for + // internal Uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The GCP project ID where the internal checker lives. Not necessary + // the same as the Metrics Scope project. + PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` + // The current operational state of the internal checker. + State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"` +} + +func (x *InternalChecker) Reset() { + *x = InternalChecker{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalChecker) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalChecker) ProtoMessage() {} + +func (x *InternalChecker) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead. +func (*InternalChecker) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +func (x *InternalChecker) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InternalChecker) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *InternalChecker) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *InternalChecker) GetGcpZone() string { + if x != nil { + return x.GcpZone + } + return "" +} + +func (x *InternalChecker) GetPeerProjectId() string { + if x != nil { + return x.PeerProjectId + } + return "" +} + +func (x *InternalChecker) GetState() InternalChecker_State { + if x != nil { + return x.State + } + return InternalChecker_UNSPECIFIED +} + +// Describes a Synthetic Monitor to be invoked by Uptime. +type SyntheticMonitorTarget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a Synthetic Monitor's execution stack. + // + // Types that are assignable to Target: + // + // *SyntheticMonitorTarget_CloudFunctionV2 + Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"` +} + +func (x *SyntheticMonitorTarget) Reset() { + *x = SyntheticMonitorTarget{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget) ProtoMessage() {} + +func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target { + if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok { + return x.CloudFunctionV2 + } + return nil +} + +type isSyntheticMonitorTarget_Target interface { + isSyntheticMonitorTarget_Target() +} + +type SyntheticMonitorTarget_CloudFunctionV2 struct { + // Target a Synthetic Monitor GCFv2 instance. + CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"` +} + +func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. A unique resource name for this Uptime check configuration. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the + // Uptime check. + // + // This field should be omitted when creating the Uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the Uptime check configuration. The display name + // should be unique within a Cloud Monitoring Workspace in order to make it + // easier to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are assignable to Resource: + // + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + // *UptimeCheckConfig_SyntheticMonitor + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of Uptime check request. + // + // Types that are assignable to CheckRequestType: + // + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the Uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `60s`. + Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The content that is expected to appear in the data returned by the target + // server against which the check is run. Currently, only the first entry + // in the `content_matchers` list is supported, and additional entries will + // be ignored. This field is optional and should only be specified if a + // content match is required as part of the/ Uptime check. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The type of checkers to use to execute the Uptime check. + CheckerType UptimeCheckConfig_CheckerType `protobuf:"varint,17,opt,name=checker_type,json=checkerType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_CheckerType" json:"checker_type,omitempty"` + // The list of regions from which the check will be run. + // Some regions contain one location, and others contain more than one. + // If this field is specified, enough regions must be provided to include a + // minimum of 3 locations. Not specifying this field will result in Uptime + // checks running from all available regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // If this is `true`, then checks are made only from the 'internal_checkers'. + // If it is `false`, then checks are made only from the 'selected_regions'. + // It is an error to provide 'selected_regions' when is_internal is `true`, + // or to provide 'internal_checkers' when is_internal is `false`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` + // The internal checkers that this check will egress from. If `is_internal` is + // `true` and this list is empty, the check will egress from all the + // InternalCheckers configured for the project that owns this + // `UptimeCheckConfig`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `UptimeCheckConfig` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,20,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UptimeCheckConfig) Reset() { + *x = UptimeCheckConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} +} + +func (x *UptimeCheckConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UptimeCheckConfig) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget { + if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok { + return x.SyntheticMonitor + } + return nil +} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if x != nil { + return x.ContentMatchers + } + return nil +} + +func (x *UptimeCheckConfig) GetCheckerType() UptimeCheckConfig_CheckerType { + if x != nil { + return x.CheckerType + } + return UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if x != nil { + return x.SelectedRegions + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetIsInternal() bool { + if x != nil { + return x.IsInternal + } + return false +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if x != nil { + return x.InternalCheckers + } + return nil +} + +func (x *UptimeCheckConfig) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + // The [monitored + // resource](https://cloud.google.com/monitoring/api/resources) associated + // with the configuration. + // The following monitored resource types are valid for this field: + // + // `uptime_url`, + // `gce_instance`, + // `gae_app`, + // `aws_ec2_instance`, + // `aws_elb_load_balancer` + // `k8s_service` + // `servicedirectory_service` + // `cloud_run_revision` + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + // The group resource associated with the configuration. + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +type UptimeCheckConfig_SyntheticMonitor struct { + // Specifies a Synthetic Monitor to invoke. + SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + // Contains information needed to make an HTTP or HTTPS check. + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + // Contains information needed to make a TCP check. + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the Uptime check originates. This is a fully + // specified IP address (not an IP address range). Most IP addresses, as of + // this publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely, and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` +} + +func (x *UptimeCheckIp) Reset() { + *x = UptimeCheckIp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckIp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckIp) ProtoMessage() {} + +func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3} +} + +func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if x != nil { + return x.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (x *UptimeCheckIp) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *UptimeCheckIp) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +// A Synthetic Monitor deployed to a Cloud Functions V2 instance. +type SyntheticMonitorTarget_CloudFunctionV2Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Fully qualified GCFv2 resource name + // i.e. `projects/{project}/locations/{location}/functions/{function}` + // Required. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The `cloud_run_revision` Monitored Resource associated with + // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and + // spans) are reported against this Monitored Resource. This field is output + // only. + CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"` +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() { + *x = SyntheticMonitorTarget_CloudFunctionV2Target{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource { + if x != nil { + return x.CloudRunRevision + } + return nil +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The group of resources being monitored. Should be only the `[GROUP_ID]`, + // and not the full-path + // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` +} + +func (x *UptimeCheckConfig_ResourceGroup) Reset() { + *x = UptimeCheckConfig_ResourceGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ResourceGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} + +func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if x != nil { + return x.GroupId + } + return "" +} + +func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if x != nil { + return x.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in sending ICMP pings alongside public HTTP/TCP +// checks. For HTTP, the pings are performed for each part of the redirect +// chain. +type UptimeCheckConfig_PingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. + PingsCount int32 `protobuf:"varint,1,opt,name=pings_count,json=pingsCount,proto3" json:"pings_count,omitempty"` +} + +func (x *UptimeCheckConfig_PingConfig) Reset() { + *x = UptimeCheckConfig_PingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_PingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_PingConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 { + if x != nil { + return x.PingsCount + } + return 0 +} + +// Information involved in an HTTP/HTTPS Uptime check request. +type UptimeCheckConfig_HttpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP request method to use for the check. If set to + // `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`. + RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"` + // If `true`, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // Optional (defaults to "/"). The path to the page against which to run + // the check. Will be combined with the `host` (specified within the + // `monitored_resource`) and `port` to construct the full URL. If the + // provided path does not begin with "/", a "/" will be prepended + // automatically. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when + // `use_ssl` is `true`). The TCP port on the HTTP server against which to + // run the check. Will be combined with host (specified within the + // `monitored_resource`) and `path` to construct the full URL. + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + // Do not set both `auth_method` and `auth_info`. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if `mask_headers` is set to `true` then the headers + // will be obscured with `******.` + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the Uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The content type header to use for the check. The following + // configurations result in errors: + // 1. Content type is specified in both the `headers` field and the + // `content_type` field. + // 2. Request method is `GET` and `content_type` is not `TYPE_UNSPECIFIED` + // 3. Request method is `POST` and `content_type` is `TYPE_UNSPECIFIED`. + // 4. Request method is `POST` and a "Content-Type" header is provided via + // `headers` field. The `content_type` field should be used instead. + ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"` + // A user provided content type header to use for the check. The invalid + // configurations outlined in the `content_type` field apply to + // `custom_content_type`, as well as the following: + // 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set. + // 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not + // set. + CustomContentType string `protobuf:"bytes,13,opt,name=custom_content_type,json=customContentType,proto3" json:"custom_content_type,omitempty"` + // Boolean specifying whether to include SSL certificate validation as a + // part of the Uptime check. Only applies to checks where + // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, + // setting `validate_ssl` to `true` has no effect. + ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"` + // The request body associated with the HTTP POST request. If `content_type` + // is `URL_ENCODED`, the body passed in must be URL-encoded. Users can + // provide a `Content-Length` header via the `headers` field or the API will + // do so. If the `request_method` is `GET` and `body` is not empty, the API + // will return an error. The maximum byte size is 1 megabyte. + // + // Note: If client libraries aren't used (which performs the conversion + // automatically) base64 encode your `body` data since the field is of + // `bytes` type. + Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"` + // If present, the check will only pass if the HTTP response status code is + // in this set of status codes. If empty, the HTTP status code will only + // pass if the HTTP status code is 200-299. + AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"` + // Contains information needed to add pings to an HTTP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` + // This field is optional and should be set only by users interested in + // an authenticated uptime check. + // Do not set both `auth_method` and `auth_info`. + // + // Types that are assignable to AuthMethod: + // + // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ + AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"` +} + +func (x *UptimeCheckConfig_HttpCheck) Reset() { + *x = UptimeCheckConfig_HttpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { + if x != nil { + return x.RequestMethod + } + return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if x != nil { + return x.UseSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if x != nil { + return x.AuthInfo + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if x != nil { + return x.MaskHeaders + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType { + if x != nil { + return x.ContentType + } + return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetCustomContentType() string { + if x != nil { + return x.CustomContentType + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool { + if x != nil { + return x.ValidateSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetAcceptedResponseStatusCodes() []*UptimeCheckConfig_HttpCheck_ResponseStatusCode { + if x != nil { + return x.AcceptedResponseStatusCodes + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod { + if m != nil { + return m.AuthMethod + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication { + if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok { + return x.ServiceAgentAuthentication + } + return nil +} + +type isUptimeCheckConfig_HttpCheck_AuthMethod interface { + isUptimeCheckConfig_HttpCheck_AuthMethod() +} + +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct { + // If specified, Uptime will generate and attach an OIDC JWT token for the + // Monitoring service agent service account as an `Authorization` header + // in the HTTP request when probing. + ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() { +} + +// Information required for a TCP Uptime check request. +type UptimeCheckConfig_TcpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TCP port on the server against which to run the check. Will be + // combined with host (specified within the `monitored_resource`) to + // construct the full URL. Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + // Contains information needed to add pings to a TCP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,2,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` +} + +func (x *UptimeCheckConfig_TcpCheck) Reset() { + *x = UptimeCheckConfig_TcpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_TcpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3} +} + +func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_TcpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +// Optional. Used to perform content matching. This allows matching based on +// substrings and regular expressions, together with their negations. Only the +// first 4 MB of an HTTP or HTTPS check's response (and the first +// 1 MB of a TCP check's response) are examined for purposes of content +// matching. +type UptimeCheckConfig_ContentMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // String, regex or JSON content to match. Maximum 1024 bytes. An empty + // `content` string indicates no content matching is to be performed. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The type of content matcher that will be applied to the server output, + // compared to the `content` string when the check is run. + Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"` + // Certain `ContentMatcherOption` types require additional information. + // `MATCHES_JSON_PATH` or `NOT_MATCHES_JSON_PATH` require a + // `JsonPathMatcher`; not used for other options. + // + // Types that are assignable to AdditionalMatcherInfo: + // + // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ + AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"` +} + +func (x *UptimeCheckConfig_ContentMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4} +} + +func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + if x != nil { + return x.Matcher + } + return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED +} + +func (m *UptimeCheckConfig_ContentMatcher) GetAdditionalMatcherInfo() isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo { + if m != nil { + return m.AdditionalMatcherInfo + } + return nil +} + +func (x *UptimeCheckConfig_ContentMatcher) GetJsonPathMatcher() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher { + if x, ok := x.GetAdditionalMatcherInfo().(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_); ok { + return x.JsonPathMatcher + } + return nil +} + +type isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo interface { + isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() +} + +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ struct { + // Matcher information for `MATCHES_JSON_PATH` and `NOT_MATCHES_JSON_PATH` + JsonPathMatcher *UptimeCheckConfig_ContentMatcher_JsonPathMatcher `protobuf:"bytes,3,opt,name=json_path_matcher,json=jsonPathMatcher,proto3,oneof"` +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_) isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() { +} + +// The authentication parameters to provide to the specified resource or +// URL that requires a username and password. Currently, only +// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is +// supported in Uptime checks. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The username to use when authenticating with the HTTP server. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to use when authenticating with the HTTP server. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +// A status to accept. Either a status code class like "2xx", or an integer +// status code like "200". +type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Either a specific value or a class of status codes. + // + // Types that are assignable to StatusCode: + // + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ + StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"` +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() { + *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode { + if m != nil { + return m.StatusCode + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusValue() int32 { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue); ok { + return x.StatusValue + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusClass() UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_); ok { + return x.StatusClass + } + return UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED +} + +type isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode interface { + isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue struct { + // A status code to accept. + StatusValue int32 `protobuf:"varint,1,opt,name=status_value,json=statusValue,proto3,oneof"` +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ struct { + // A class of status codes to accept. + StatusClass UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass `protobuf:"varint,2,opt,name=status_class,json=statusClass,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// The OIDC token will be generated for the Monitoring service agent service +// account. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of authentication. + Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + if x != nil { + return x.Type + } + return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED +} + +// Information needed to perform a JSONPath content match. +// Used for `ContentMatcherOption::MATCHES_JSON_PATH` and +// `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // JSONPath within the response output pointing to the expected + // `ContentMatcher::content` to match against. + JsonPath string `protobuf:"bytes,1,opt,name=json_path,json=jsonPath,proto3" json:"json_path,omitempty"` + // The type of JSONPath match that will be applied to the JSON output + // (`ContentMatcher.content`) + JsonMatcher UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption `protobuf:"varint,2,opt,name=json_matcher,json=jsonMatcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption" json:"json_matcher,omitempty"` +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string { + if x != nil { + return x.JsonPath + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonMatcher() UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + if x != nil { + return x.JsonMatcher + } + return UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED +} + +var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, + 0x19, 0x0a, 0x08, 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, + 0x02, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, + 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x32, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, + 0x65, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, + 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x1a, 0xef, 0x0e, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, + 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, + 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, + 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, + 0x1e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x31, 0x0a, 0x2d, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x10, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, + 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, + 0x44, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x84, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x11, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, + 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0f, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x1a, 0x94, 0x02, 0x0a, 0x0f, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x7f, 0x0a, 0x0c, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x22, 0x63, 0x0a, 0x15, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x24, + 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, + 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x41, 0x43, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x47, 0x45, 0x58, + 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x26, 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, + 0x43, 0x48, 0x45, 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, + 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, + 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, + 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, + 0x48, 0x10, 0x06, 0x42, 0x19, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x3d, + 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x55, 0x0a, + 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, + 0x41, 0x54, 0x49, 0x43, 0x5f, 0x49, 0x50, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x56, 0x50, 0x43, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, + 0x52, 0x53, 0x10, 0x03, 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, + 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, + 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x11, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, + 0x0d, 0x53, 0x4f, 0x55, 0x54, 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, + 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x5f, 0x4f, 0x52, 0x45, 0x47, 0x4f, 0x4e, + 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x53, 0x41, 0x5f, 0x49, 0x4f, 0x57, 0x41, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x41, 0x5f, 0x56, 0x49, 0x52, 0x47, 0x49, 0x4e, 0x49, 0x41, + 0x10, 0x07, 0x2a, 0x5b, 0x0a, 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, + 0x43, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, + 0xaf, 0x02, 0xea, 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_monitoring_v3_uptime_proto_goTypes = []any{ + (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion + (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType + (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State + (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType + (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker + (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget + (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig + (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp + (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup + (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig + (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck + (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck + (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher + nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration +} +var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ + 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State + 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource + 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup + 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget + 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck + 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck + 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration + 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration + 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher + 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType + 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion + 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker + 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion + 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource + 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType + 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_proto_init() } +func file_google_monitoring_v3_uptime_proto_init() { + if File_google_monitoring_v3_uptime_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*InternalChecker); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckIp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget_CloudFunctionV2Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ResourceGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_PingConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_TcpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ResponseStatusCode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{ + (*SyntheticMonitorTarget_CloudFunctionV2)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_SyntheticMonitor)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{ + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil), + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, + NumEnums: 10, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_uptime_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_proto = out.File + file_google_monitoring_v3_uptime_proto_rawDesc = nil + file_google_monitoring_v3_uptime_proto_goTypes = nil + file_google_monitoring_v3_uptime_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go new file mode 100644 index 000000000..d4ba902fb --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -0,0 +1,1226 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // Uptime check configurations are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // If provided, this field specifies the criteria that must be met by + // uptime checks to be included in the response. + // + // For more details, see [Filtering + // syntax](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax). + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckConfigsRequest) Reset() { + *x = ListUptimeCheckConfigsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListUptimeCheckConfigsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckConfigsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned Uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of Uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListUptimeCheckConfigsResponse) Reset() { + *x = ListUptimeCheckConfigsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfigs + } + return nil +} + +func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetUptimeCheckConfigRequest) Reset() { + *x = GetUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the Uptime check. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The new Uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *CreateUptimeCheckConfigRequest) Reset() { + *x = CreateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateUptimeCheckConfigRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. If present, only the listed fields in the current Uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `updateMask` has been specified, this field gives + // the values for the set of fields mentioned in the `updateMask`. If an + // `updateMask` has not been given, this Uptime check configuration replaces + // the current configuration. If a field is mentioned in `updateMask` but + // the corresponding field is omitted in this partial Uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *UpdateUptimeCheckConfigRequest) Reset() { + *x = UpdateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteUptimeCheckConfigRequest) Reset() { + *x = DeleteUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsRequest) Reset() { + *x = ListUptimeCheckIpsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckIpsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsResponse) Reset() { + *x = ListUptimeCheckIpsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if x != nil { + return x.UptimeCheckIps + } + return nil +} + +func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, + 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, + 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a, + 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x39, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x64, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x2a, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x71, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x55, + 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c, + 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, + 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, + 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, + 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, + 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_service_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{ + (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest + (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse + (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest + (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest + (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest + (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest + (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest + (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse + (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig + (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask + (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig + 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp + 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest + 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest + 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest + 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest + 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest + 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest + 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse + 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_service_proto_init() } +func file_google_monitoring_v3_uptime_service_proto_init() { + if File_google_monitoring_v3_uptime_service_proto != nil { + return + } + file_google_monitoring_v3_uptime_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_service_proto = out.File + file_google_monitoring_v3_uptime_service_proto_rawDesc = nil + file_google_monitoring_v3_uptime_service_proto_goTypes = nil + file_google_monitoring_v3_uptime_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations. +type UnimplementedUptimeCheckServiceServer struct { +} + +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented") +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go new file mode 100644 index 000000000..6f7fe5d7c --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -0,0 +1,618 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newNotificationChannelClientHook clientHook + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption + SendNotificationChannelVerificationCode []gax.CallOption + GetNotificationChannelVerificationCode []gax.CallOption + VerifyNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannelDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListNotificationChannels: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + SendNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + VerifyNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API. +type internalNotificationChannelClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator + GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) + ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator + GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error + SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error + GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) + VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) +} + +// NotificationChannelClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +type NotificationChannelClient struct { + // The internal transport-dependent client. + internalClient internalNotificationChannelClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...) +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...) +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +// To list the types of notification channels that are supported, use +// the ListNotificationChannelDescriptors method. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + return c.internalClient.ListNotificationChannels(ctx, req, opts...) +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.GetNotificationChannel(ctx, req, opts...) +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or PagerDuty service. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.CreateNotificationChannel(ctx, req, opts...) +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.UpdateNotificationChannel(ctx, req, opts...) +} + +// DeleteNotificationChannel deletes a notification channel. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationChannel(ctx, req, opts...) +} + +// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code +// can then be supplied in VerifyNotificationChannel to verify the channel. +func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...) +} + +// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then +// be used in a call to VerifyNotificationChannel() on a different channel +// with an equivalent identity in the same or in a different project. This +// makes it possible to copy a channel between projects without requiring +// manual reverification of the channel. If the channel is not in the +// verified state, this method will fail (in other words, this may only be +// used if the SendNotificationChannelVerificationCode and +// VerifyNotificationChannel paths have already been used to put the given +// channel into the verified state). +// +// There is no guarantee that the verification codes returned by this method +// will be of a similar structure or form as the ones that are delivered +// to the channel via SendNotificationChannelVerificationCode; while +// VerifyNotificationChannel() will recognize both the codes delivered via +// SendNotificationChannelVerificationCode() and returned from +// GetNotificationChannelVerificationCode(), it is typically the case that +// the verification codes delivered via +// SendNotificationChannelVerificationCode() will be shorter and also +// have a shorter expiration (e.g. codes such as “G-123456”) whereas +// GetVerificationCode() will typically return a much longer, websafe base +// 64 encoded string that has a longer expiration time. +func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...) +} + +// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code +// delivered to the channel as a result of calling +// SendNotificationChannelVerificationCode. +func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.VerifyNotificationChannel(ctx, req, opts...) +} + +// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type notificationChannelGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing NotificationChannelClient + CallOptions **NotificationChannelCallOptions + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewNotificationChannelClient creates a new notification channel service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + clientOpts := defaultNotificationChannelGRPCClientOptions() + if newNotificationChannelClientHook != nil { + hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()} + + c := ¬ificationChannelGRPCClient{ + connPool: connPool, + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *notificationChannelGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + resp := &monitoringpb.ListNotificationChannelsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...) + var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go new file mode 100644 index 000000000..3c111637e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" +) + +var newQueryClientHook clientHook + +// QueryCallOptions contains the retry settings for each method of QueryClient. +type QueryCallOptions struct { + QueryTimeSeries []gax.CallOption +} + +func defaultQueryGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultQueryCallOptions() *QueryCallOptions { + return &QueryCallOptions{ + QueryTimeSeries: []gax.CallOption{}, + } +} + +// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API. +type internalQueryClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator +} + +// QueryClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +type QueryClient struct { + // The internal transport-dependent client. + internalClient internalQueryClient + + // The call options for this service. + CallOptions *QueryCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *QueryClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *QueryClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *QueryClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// QueryTimeSeries queries time series using Monitoring Query Language. +func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + return c.internalClient.QueryTimeSeries(ctx, req, opts...) +} + +// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type queryGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing QueryClient + CallOptions **QueryCallOptions + + // The gRPC API client. + queryClient monitoringpb.QueryServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewQueryClient creates a new query service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) { + clientOpts := defaultQueryGRPCClientOptions() + if newQueryClientHook != nil { + hookOpts, err := newQueryClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := QueryClient{CallOptions: defaultQueryCallOptions()} + + c := &queryGRPCClient{ + connPool: connPool, + queryClient: monitoringpb.NewQueryServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *queryGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *queryGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...) + it := &TimeSeriesDataIterator{} + req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) { + resp := &monitoringpb.QueryTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.queryClient.QueryTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go new file mode 100644 index 000000000..7776c425f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -0,0 +1,565 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newServiceMonitoringClientHook clientHook + +// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient. +type ServiceMonitoringCallOptions struct { + CreateService []gax.CallOption + GetService []gax.CallOption + ListServices []gax.CallOption + UpdateService []gax.CallOption + DeleteService []gax.CallOption + CreateServiceLevelObjective []gax.CallOption + GetServiceLevelObjective []gax.CallOption + ListServiceLevelObjectives []gax.CallOption + UpdateServiceLevelObjective []gax.CallOption + DeleteServiceLevelObjective []gax.CallOption +} + +func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions { + return &ServiceMonitoringCallOptions{ + CreateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServices: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServiceLevelObjectives: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API. +type internalServiceMonitoringClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator + UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error + CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator + UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error +} + +// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +type ServiceMonitoringClient struct { + // The internal transport-dependent client. + internalClient internalServiceMonitoringClient + + // The call options for this service. + CallOptions *ServiceMonitoringCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ServiceMonitoringClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateService create a Service. +func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.CreateService(ctx, req, opts...) +} + +// GetService get the named Service. +func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.GetService(ctx, req, opts...) +} + +// ListServices list Services for this Metrics Scope. +func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + return c.internalClient.ListServices(ctx, req, opts...) +} + +// UpdateService update this Service. +func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.UpdateService(ctx, req, opts...) +} + +// DeleteService soft delete this Service. +func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteService(ctx, req, opts...) +} + +// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service. +func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...) +} + +// GetServiceLevelObjective get a ServiceLevelObjective by name. +func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.GetServiceLevelObjective(ctx, req, opts...) +} + +// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service. +func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...) +} + +// UpdateServiceLevelObjective update the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...) +} + +// DeleteServiceLevelObjective delete the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...) +} + +// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type serviceMonitoringGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing ServiceMonitoringClient + CallOptions **ServiceMonitoringCallOptions + + // The gRPC API client. + serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { + clientOpts := defaultServiceMonitoringGRPCClientOptions() + if newServiceMonitoringClientHook != nil { + hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()} + + c := &serviceMonitoringGRPCClient{ + connPool: connPool, + serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *serviceMonitoringGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...) + it := &ServiceIterator{} + req = proto.Clone(req).(*monitoringpb.ListServicesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) { + resp := &monitoringpb.ListServicesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServices(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServices(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteService(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...) + it := &ServiceLevelObjectiveIterator{} + req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) { + resp := &monitoringpb.ListServiceLevelObjectivesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServiceLevelObjectives(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go new file mode 100644 index 000000000..32cad577e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -0,0 +1,343 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newSnoozeClientHook clientHook + +// SnoozeCallOptions contains the retry settings for each method of SnoozeClient. +type SnoozeCallOptions struct { + CreateSnooze []gax.CallOption + ListSnoozes []gax.CallOption + GetSnooze []gax.CallOption + UpdateSnooze []gax.CallOption +} + +func defaultSnoozeGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSnoozeCallOptions() *SnoozeCallOptions { + return &SnoozeCallOptions{ + CreateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + ListSnoozes: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API. +type internalSnoozeClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator + GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) +} + +// SnoozeClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +type SnoozeClient struct { + // The internal transport-dependent client. + internalClient internalSnoozeClient + + // The call options for this service. + CallOptions *SnoozeCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SnoozeClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SnoozeClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSnooze creates a Snooze that will prevent alerts, which match the provided +// criteria, from being opened. The Snooze applies for a specific time +// interval. +func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.CreateSnooze(ctx, req, opts...) +} + +// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in +// filter, which specifies predicates to match Snoozes. +func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + return c.internalClient.ListSnoozes(ctx, req, opts...) +} + +// GetSnooze retrieves a Snooze by name. +func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.GetSnooze(ctx, req, opts...) +} + +// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the +// given Snooze object. +func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.UpdateSnooze(ctx, req, opts...) +} + +// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type snoozeGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing SnoozeClient + CallOptions **SnoozeCallOptions + + // The gRPC API client. + snoozeClient monitoringpb.SnoozeServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewSnoozeClient creates a new snooze service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) { + clientOpts := defaultSnoozeGRPCClientOptions() + if newSnoozeClientHook != nil { + hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()} + + c := &snoozeGRPCClient{ + connPool: connPool, + snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *snoozeGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.CreateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...) + it := &SnoozeIterator{} + req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) { + resp := &monitoringpb.ListSnoozesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.ListSnoozes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSnoozes(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.GetSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.UpdateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go new file mode 100644 index 000000000..d38152513 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -0,0 +1,450 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newUptimeCheckClientHook clientHook + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListUptimeCheckIps: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API. +type internalUptimeCheckClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator + GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error + ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator +} + +// UptimeCheckClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +type UptimeCheckClient struct { + // The internal transport-dependent client. + internalClient internalUptimeCheckClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project +// (leaving out any invalid configurations). +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...) +} + +// GetUptimeCheckConfig gets a single Uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...) +} + +// CreateUptimeCheckConfig creates a new Uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...) +} + +// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via updateMask. +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...) +} + +// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail +// if the Uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...) +} + +// ListUptimeCheckIps returns the list of IP addresses that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + return c.internalClient.ListUptimeCheckIps(ctx, req, opts...) +} + +// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type uptimeCheckGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing UptimeCheckClient + CallOptions **UptimeCheckCallOptions + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewUptimeCheckClient creates a new uptime check service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + clientOpts := defaultUptimeCheckGRPCClientOptions() + if newUptimeCheckClientHook != nil { + hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()} + + c := &uptimeCheckGRPCClient{ + connPool: connPool, + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *uptimeCheckGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + resp := &monitoringpb.ListUptimeCheckConfigsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + resp := &monitoringpb.ListUptimeCheckIpsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go new file mode 100644 index 000000000..accff0f5e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package monitoring + +import "cloud.google.com/go/monitoring/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go new file mode 100644 index 000000000..670f0797e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.21.1" diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 1e924a834..73021df53 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -27,6 +27,9 @@ "apigeeregistry": { "component": "apigeeregistry" }, + "apihub": { + "component": "apihub" + }, "apikeys": { "component": "apikeys" }, diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index e9fb55585..7b1b06336 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,83 @@ # Changes +## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.45.0...storage/v1.46.0) (2024-10-31) + +### Features + +* **storage:** Add grpc metrics experimental options ([#10984](https://github.com/googleapis/google-cloud-go/issues/10984)) ([5b7397b](https://github.com/googleapis/google-cloud-go/commit/5b7397b169176f030049e1511859a883422c774e)) + + +### Bug Fixes + +* **storage:** Skip only specific transport tests. ([#11016](https://github.com/googleapis/google-cloud-go/issues/11016)) ([d40fbff](https://github.com/googleapis/google-cloud-go/commit/d40fbff9c1984aeed0224a4ac93eb95c5af17126)) +* **storage:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) +* **storage:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([2b8ca4b](https://github.com/googleapis/google-cloud-go/commit/2b8ca4b4127ce3025c7a21cc7247510e07cc5625)) + + +### Miscellaneous Chores + +* **storage/internal:** Remove notification, service account, and hmac RPCS. These API have been migrated to Storage Control and are available via the JSON API. ([#11008](https://github.com/googleapis/google-cloud-go/issues/11008)) ([e0759f4](https://github.com/googleapis/google-cloud-go/commit/e0759f46639b4c542e5b49e4dc81340d8e123370)) + +## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.44.0...storage/v1.45.0) (2024-10-17) + + +### Features + +* **storage/internal:** Adds support for restore token ([70d82fe](https://github.com/googleapis/google-cloud-go/commit/70d82fe93f60f1075298a077ce1616f9ae7e13fe)) +* **storage:** Adding bucket-specific dynamicDelay ([#10987](https://github.com/googleapis/google-cloud-go/issues/10987)) ([a807a7e](https://github.com/googleapis/google-cloud-go/commit/a807a7e7f9fb002374407622c126102c5e61af82)) +* **storage:** Dynamic read request stall timeout ([#10958](https://github.com/googleapis/google-cloud-go/issues/10958)) ([a09f00e](https://github.com/googleapis/google-cloud-go/commit/a09f00eeecac82af98ae769bab284ee58a3a66cb)) + + +### Documentation + +* **storage:** Remove preview wording from NewGRPCClient ([#11002](https://github.com/googleapis/google-cloud-go/issues/11002)) ([40c3a5b](https://github.com/googleapis/google-cloud-go/commit/40c3a5b9c4cd4db2f1695e180419197b6a03ed7f)) + +## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03) + + +### Features + +* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f)) +* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2)) +* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f)) +* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) +* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545)) +* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8)) +* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb)) +* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88)) +* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b)) +* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) +* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361) +* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34)) +* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8)) +* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62)) + + +### Bug Fixes + +* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c)) +* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567) +* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978)) +* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) +* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d)) +* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0)) +* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2)) +* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + + +### Performance Improvements + +* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d)) + + +### Documentation + +* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) +* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) + ## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index d582a60d0..3eded0178 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -416,6 +416,10 @@ type BucketAttrs struct { // This field is read-only. Created time.Time + // Updated is the time at which the bucket was last modified. + // This field is read-only. + Updated time.Time + // VersioningEnabled reports whether this bucket has versioning enabled. VersioningEnabled bool @@ -824,6 +828,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { DefaultEventBasedHold: b.DefaultEventBasedHold, StorageClass: b.StorageClass, Created: convertTime(b.TimeCreated), + Updated: convertTime(b.Updated), VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, ACL: toBucketACLRules(b.Acl), DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), @@ -861,6 +866,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { DefaultEventBasedHold: b.GetDefaultEventBasedHold(), StorageClass: b.GetStorageClass(), Created: b.GetCreateTime().AsTime(), + Updated: b.GetUpdateTime().AsTime(), VersioningEnabled: b.GetVersioning().GetEnabled(), ACL: toBucketACLRulesFromProto(b.GetAcl()), DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index bbe89276a..aebba2251 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -122,7 +122,7 @@ type settings struct { gax []gax.CallOption // idempotent indicates if the call is idempotent or not when considering - // if the call should be retired or not. + // if the call should be retried or not. idempotent bool // clientOption is a set of option.ClientOption to be used during client @@ -132,6 +132,8 @@ type settings struct { // userProject is the user project that should be billed for the request. userProject string + + metricsContext *metricsContext } func initSettings(opts ...storageOption) *settings { diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index c274c762e..4fcfb7326 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -331,14 +331,14 @@ to add a [custom audit logging] header: // Use client as usual with the context and the additional headers will be sent. client.Bucket("my-bucket").Attrs(ctx) -# Experimental gRPC API +# gRPC API -This package includes support for the Cloud Storage gRPC API, which is currently -in preview. This implementation uses gRPC rather than the current JSON & XML -APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC -team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to -allowlist to access this API. The Go Storage gRPC library is not yet generally -available, so it may be subject to breaking changes. +This package includes support for the Cloud Storage gRPC API. The +implementation uses gRPC rather than the Default +JSON & XML APIs to make requests to Cloud Storage. +The Go Storage gRPC client is generally available. +The Notifications, Serivce Account HMAC +and GetServiceAccount RPCs are not supported through the gRPC client. To create a client which will use gRPC, use the alternate constructor: @@ -349,15 +349,43 @@ To create a client which will use gRPC, use the alternate constructor: } // Use client as usual. -If the application is running within GCP, users may get better performance by -enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, -set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add -the following side-effect imports to your application: +Using the gRPC API inside GCP with a bucket in the same region can allow for +[Direct Connectivity] (enabling requests to skip some proxy steps and reducing +response latency). A warning is emmitted if gRPC is not used within GCP to +warn that Direct Connectivity could not be initialized. Direct Connectivity +is not required to access the gRPC API. - import ( - _ "google.golang.org/grpc/balancer/rls" - _ "google.golang.org/grpc/xds/googledirectpath" - ) +Dependencies for the gRPC API may slightly increase the size of binaries for +applications depending on this package. If you are not using gRPC, you can use +the build tag `disable_grpc_modules` to opt out of these dependencies and +reduce the binary size. + +The gRPC client emits metrics by default and will export the +gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to +[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring +API and you incur no additional cost for publishing the metrics. Google Cloud +Support can use this information to more quickly diagnose problems related to +GCS and gRPC. +Sending this data does not incur any billing charges, and requires minimal +CPU (a single RPC every minute) or memory (a few KiB to batch the +telemetry). + +To access the metrics you can view them through Cloud Monitoring +[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted +every minute. + +You can disable metrics using the following example when creating a new gRPC +client using [WithDisabledClientMetrics]. + +The metrics exporter uses Cloud Monitoring API which determines +project ID and credentials doing the following: + +* Project ID is determined using OTel Resource Detector for the environment +otherwise it falls back to the project provided by [google.FindCredentials]. + +* Credentials are determined using [Application Default Credentials]. The +principal must have `roles/monitoring.metricWriter` role granted. If not a +logged warning will be emitted. Subsequent are silenced to prevent noisy logs. # Storage Control API @@ -366,6 +394,11 @@ and Managed Folder operations) are supported via the autogenerated Storage Contr client, which is available as a subpackage in this module. See package docs at [cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. +[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials +[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials +[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md +[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md +[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -375,5 +408,7 @@ client, which is available as a subpackage in this module. See package docs at [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview [custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata [Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 +[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer +[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go new file mode 100644 index 000000000..5944f515d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go @@ -0,0 +1,237 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "math" + "sync" + "time" +) + +// dynamicDelay dynamically calculates the delay at a fixed percentile, based on +// delay samples. +// +// dynamicDelay is goroutine-safe. +type dynamicDelay struct { + increaseFactor float64 + decreaseFactor float64 + minDelay time.Duration + maxDelay time.Duration + value time.Duration + + // Guards the value + mu *sync.RWMutex +} + +// validateDynamicDelayParams ensures, +// targetPercentile is a valid fraction (between 0 and 1). +// increaseRate is a positive number. +// minDelay is less than maxDelay. +func validateDynamicDelayParams(targetPercentile, increaseRate float64, minDelay, maxDelay time.Duration) error { + if targetPercentile < 0 || targetPercentile > 1 { + return fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile) + } + if increaseRate <= 0 { + return fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate) + } + if minDelay >= maxDelay { + return fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay) + } + return nil +} + +// NewDynamicDelay returns a dynamicDelay. +// +// targetPercentile is the desired percentile to be computed. For example, a +// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be +// in the range [0, 1]. +// +// increaseRate (must be > 0) determines how many increase calls it takes for +// Value to double. +// +// initialDelay is the start value of the delay. +// +// decrease can never lower the delay past minDelay, increase can never raise +// the delay past maxDelay. +func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) *dynamicDelay { + if initialDelay < minDelay { + initialDelay = minDelay + } + if initialDelay > maxDelay { + initialDelay = maxDelay + } + + // Compute increaseFactor and decreaseFactor such that: + // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1 + increaseFactor := math.Exp(math.Log(2) / increaseRate) + if increaseFactor < 1.001 { + increaseFactor = 1.001 + } + decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile) + if decreaseFactor > 0.9999 { + decreaseFactor = 0.9999 + } + + return &dynamicDelay{ + increaseFactor: increaseFactor, + decreaseFactor: decreaseFactor, + minDelay: minDelay, + maxDelay: maxDelay, + value: initialDelay, + mu: &sync.RWMutex{}, + } +} + +func (d *dynamicDelay) unsafeIncrease() { + v := time.Duration(float64(d.value) * d.increaseFactor) + if v > d.maxDelay { + d.value = d.maxDelay + } else { + d.value = v + } +} + +// increase notes that the operation took longer than the delay returned by Value. +func (d *dynamicDelay) increase() { + d.mu.Lock() + defer d.mu.Unlock() + + d.unsafeIncrease() +} + +func (d *dynamicDelay) unsafeDecrease() { + v := time.Duration(float64(d.value) * d.decreaseFactor) + if v < d.minDelay { + d.value = d.minDelay + } else { + d.value = v + } +} + +// decrease notes that the operation completed before the delay returned by getValue. +func (d *dynamicDelay) decrease() { + d.mu.Lock() + defer d.mu.Unlock() + + d.unsafeDecrease() +} + +// update updates the delay value depending on the specified latency. +func (d *dynamicDelay) update(latency time.Duration) { + d.mu.Lock() + defer d.mu.Unlock() + + if latency > d.value { + d.unsafeIncrease() + } else { + d.unsafeDecrease() + } +} + +// getValue returns the desired delay to wait before retry the operation. +func (d *dynamicDelay) getValue() time.Duration { + d.mu.RLock() + defer d.mu.RUnlock() + + return d.value +} + +// printDelay prints the state of delay, helpful in debugging. +func (d *dynamicDelay) printDelay() { + d.mu.RLock() + defer d.mu.RUnlock() + + fmt.Println("IncreaseFactor: ", d.increaseFactor) + fmt.Println("DecreaseFactor: ", d.decreaseFactor) + fmt.Println("MinDelay: ", d.minDelay) + fmt.Println("MaxDelay: ", d.maxDelay) + fmt.Println("Value: ", d.value) +} + +// bucketDelayManager wraps dynamicDelay to provide bucket-specific delays. +type bucketDelayManager struct { + targetPercentile float64 + increaseRate float64 + initialDelay time.Duration + minDelay time.Duration + maxDelay time.Duration + + // delays maps bucket names to their dynamic delay instance. + delays map[string]*dynamicDelay + + // mu guards delays. + mu *sync.RWMutex +} + +// newBucketDelayManager returns a new bucketDelayManager instance. +func newBucketDelayManager(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*bucketDelayManager, error) { + err := validateDynamicDelayParams(targetPercentile, increaseRate, minDelay, maxDelay) + if err != nil { + return nil, err + } + + return &bucketDelayManager{ + targetPercentile: targetPercentile, + increaseRate: increaseRate, + initialDelay: initialDelay, + minDelay: minDelay, + maxDelay: maxDelay, + delays: make(map[string]*dynamicDelay), + mu: &sync.RWMutex{}, + }, nil +} + +// getDelay retrieves the dynamicDelay instance for the given bucket name. If no delay +// exists for the bucket, a new one is created with the configured parameters. +func (b *bucketDelayManager) getDelay(bucketName string) *dynamicDelay { + b.mu.RLock() + delay, ok := b.delays[bucketName] + b.mu.RUnlock() + + if !ok { + b.mu.Lock() + defer b.mu.Unlock() + + // Check again, as someone might create b/w the execution of mu.RUnlock() and mu.Lock(). + delay, ok = b.delays[bucketName] + if !ok { + // Create a new dynamicDelay for the bucket if it doesn't exist + delay = newDynamicDelay(b.targetPercentile, b.increaseRate, b.initialDelay, b.minDelay, b.maxDelay) + b.delays[bucketName] = delay + } + } + return delay +} + +// increase notes that the operation took longer than the delay for the given bucket. +func (b *bucketDelayManager) increase(bucketName string) { + b.getDelay(bucketName).increase() +} + +// decrease notes that the operation completed before the delay for the given bucket. +func (b *bucketDelayManager) decrease(bucketName string) { + b.getDelay(bucketName).decrease() +} + +// update updates the delay value for the bucket depending on the specified latency. +func (b *bucketDelayManager) update(bucketName string, latency time.Duration) { + b.getDelay(bucketName).update(latency) +} + +// getValue returns the desired delay to wait before retrying the operation for the given bucket. +func (b *bucketDelayManager) getValue(bucketName string) time.Duration { + return b.getDelay(bucketName).getValue() +} diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go new file mode 100644 index 000000000..b35de64d3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go @@ -0,0 +1,75 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package experimental is a collection of experimental features that might +// have some rough edges to them. Housing experimental features in this package +// results in a user accessing these APIs as `experimental.Foo`, thereby making +// it explicit that the feature is experimental and using them in production +// code is at their own risk. +// +// All APIs in this package are experimental. +package experimental + +import ( + "time" + + "cloud.google.com/go/storage/internal" + "go.opentelemetry.io/otel/sdk/metric" + "google.golang.org/api/option" +) + +// WithMetricInterval provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient]. +// It sets how often to emit metrics [metric.WithInterval] when using +// [metric.NewPeriodicReader] +// When using Cloud Monitoring interval must be at minimum 1 [time.Minute]. +func WithMetricInterval(metricInterval time.Duration) option.ClientOption { + return internal.WithMetricInterval.(func(time.Duration) option.ClientOption)(metricInterval) +} + +// WithMetricExporter provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient]. +// Set an alternate client-side metric Exporter to emit metrics through. +// Must implement [metric.Exporter] +func WithMetricExporter(ex *metric.Exporter) option.ClientOption { + return internal.WithMetricExporter.(func(*metric.Exporter) option.ClientOption)(ex) +} + +// WithReadStallTimeout provides a [option.ClientOption] that may be passed to [storage.NewClient]. +// It enables the client to retry stalled requests when starting a download from +// Cloud Storage. If the timeout elapses with no response from the server, the request +// is automatically retried. +// The timeout is initially set to ReadStallTimeoutConfig.Min. The client tracks +// latency across all read requests from the client for each bucket accessed, and can +// adjust the timeout higher to the target percentile when latency for request to that +// bucket is high. +// Currently, this is supported only for downloads ([storage.NewReader] and +// [storage.NewRangeReader] calls) and only for the XML API. Other read APIs (gRPC & JSON) +// will be supported soon. +func WithReadStallTimeout(rstc *ReadStallTimeoutConfig) option.ClientOption { + return internal.WithReadStallTimeout.(func(config *ReadStallTimeoutConfig) option.ClientOption)(rstc) +} + +// ReadStallTimeoutConfig defines the timeout which is adjusted dynamically based on +// past observed latencies. +type ReadStallTimeoutConfig struct { + // Min is the minimum duration of the timeout. The default value is 500ms. Requests + // taking shorter than this value to return response headers will never time out. + // In general, you should choose a Min value that is greater than the typical value + // for the target percentile. + Min time.Duration + + // TargetPercentile is the percentile to target for the dynamic timeout. The default + // value is 0.99. At the default percentile, at most 1% of requests will be timed out + // and retried. + TargetPercentile float64 +} diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index d81a17b6b..407892705 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -16,11 +16,12 @@ package storage import ( "context" - "encoding/base64" + "encoding/binary" "errors" "fmt" "hash/crc32" "io" + "log" "net/url" "os" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protowire" @@ -95,10 +97,11 @@ func defaultGRPCOptions() []option.ClientOption { option.WithEndpoint(host), option.WithGRPCDialOption(grpc.WithInsecure()), option.WithoutAuthentication(), + WithDisabledClientMetrics(), ) } else { // Only enable DirectPath when the emulator is not being targeted. - defaults = append(defaults, internaloption.EnableDirectPath(true)) + defaults = append(defaults, internaloption.EnableDirectPath(true), internaloption.EnableDirectPathXds()) } return defaults @@ -124,6 +127,15 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") } + if !config.disableClientMetrics { + // Do not fail client creation if enabling metrics fails. + if metricsContext, err := enableClientMetrics(ctx, s, config); err == nil { + s.metricsContext = metricsContext + s.clientOption = append(s.clientOption, metricsContext.clientOpts...) + } else { + log.Printf("Failed to enable client metrics: %v", err) + } + } g, err := gapic.NewClient(ctx, s.clientOption...) if err != nil { return nil, err @@ -136,26 +148,17 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl } func (c *grpcStorageClient) Close() error { + if c.settings.metricsContext != nil { + c.settings.metricsContext.close() + } return c.raw.Close() } // Top-level methods. +// GetServiceAccount is not supported in the gRPC client. func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetServiceAccountRequest{ - Project: toProjectResource(project), - } - var resp *storagepb.ServiceAccount - err := run(ctx, func(ctx context.Context) error { - var err error - resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - return resp.EmailAddress, err + return "", errMethodNotSupported } func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { @@ -432,16 +435,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask SoftDeleted: it.query.SoftDeleted, + IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes, } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { - // IncludeFoldersAsPrefixes is not supported for gRPC - // TODO: remove this when support is added in the proto. - if it.query.IncludeFoldersAsPrefixes { - return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") - } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { @@ -959,37 +958,48 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } -// bytesCodec is a grpc codec which permits receiving messages as either -// protobuf messages, or as raw []bytes. -type bytesCodec struct { - encoding.Codec +// Custom codec to be used for unmarshaling ReadObjectResponse messages. +// This is used to avoid a copy of object data in proto.Unmarshal. +type bytesCodecV2 struct { } -func (bytesCodec) Marshal(v any) ([]byte, error) { +var _ encoding.CodecV2 = bytesCodecV2{} + +// Marshal is used to encode messages to send for bytesCodecV2. Since we are only +// using this to send ReadObjectRequest messages we don't need to recycle buffers +// here. +func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + var data mem.BufferSlice + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + return data, nil } -func (bytesCodec) Unmarshal(data []byte, v any) error { +// Unmarshal is used for data received for ReadObjectResponse. We want to preserve +// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal. +func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error { switch v := v.(type) { - case *[]byte: - // If gRPC could recycle the data []byte after unmarshaling (through - // buffer pools), we would need to make a copy here. + case *mem.BufferSlice: *v = data + // Pick up a reference to the data so that it is not freed while decoding. + data.Ref() return nil case proto.Message: - return proto.Unmarshal(data, v) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + return proto.Unmarshal(buf.ReadOnlyData(), v) default: - return fmt.Errorf("can not unmarshal type %T", v) + return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v) } } -func (bytesCodec) Name() string { - // If this isn't "", then gRPC sets the content-subtype of the call to this - // value and we get errors. +func (bytesCodecV2) Name() string { return "" } @@ -1000,7 +1010,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange s := callSettings(c.settings, opts...) s.gax = append(s.gax, gax.WithGRPCOptions( - grpc.ForceCodec(bytesCodec{}), + grpc.ForceCodecV2(bytesCodecV2{}), )) if s.userProject != "" { @@ -1018,8 +1028,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.Generation = params.gen } - var databuf []byte - // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { @@ -1045,18 +1053,19 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } var stream storagepb.Storage_ReadObjectClient - var msg *storagepb.ReadObjectResponse var err error + var decoder *readResponseDecoder err = run(cc, func(ctx context.Context) error { - stream, err = c.raw.ReadObject(cc, req, s.gax...) + stream, err = c.raw.ReadObject(ctx, req, s.gax...) if err != nil { return err } // Receive the message into databuf as a wire-encoded message so we can // use a custom decoder to avoid an extra copy at the protobuf layer. - err := stream.RecvMsg(&databuf) + databufs := mem.BufferSlice{} + err := stream.RecvMsg(&databufs) // These types of errors show up on the Recv call, rather than the // initialization of the stream via ReadObject above. if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { @@ -1066,22 +1075,26 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return err } // Use a custom decoder that uses protobuf unmarshalling for all - // fields except the checksummed data. - // Subsequent receives in Read calls will skip all protobuf - // unmarshalling and directly read the content from the gRPC []byte - // response, since only the first call will contain other fields. - msg, err = readFullObjectResponse(databuf) - + // fields except the object data. Object data is handled separately + // to avoid a copy. + decoder = &readResponseDecoder{ + databufs: databufs, + } + err = decoder.readFullObjectResponse() return err }, s.retry, s.idempotent) if err != nil { // Close the stream context we just created to ensure we don't leak // resources. cancel() + // Free any buffers. + if decoder != nil && decoder.databufs != nil { + decoder.databufs.Free() + } return nil, nil, err } - return &readStreamResponse{stream, msg}, cancel, nil + return &readStreamResponse{stream, decoder}, cancel, nil } res, cancel, err := reopen(0) @@ -1091,7 +1104,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // The first message was Recv'd on stream open, use it to populate the // object metadata. - msg := res.response + msg := res.decoder.msg obj := msg.GetMetadata() // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() @@ -1101,9 +1114,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange wantCRC uint32 checkCRC bool ) - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil { + if params.offset == 0 && params.length < 0 { + checkCRC = true + } wantCRC = checksums.GetCrc32C() - checkCRC = true } r = &Reader{ @@ -1115,18 +1130,17 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange LastModified: obj.GetUpdateTime().AsTime(), Metageneration: obj.GetMetageneration(), Generation: obj.GetGeneration(), + CRC32C: wantCRC, }, reader: &gRPCReader{ stream: res.stream, reopen: reopen, cancel: cancel, size: size, - // Store the content from the first Recv in the - // client buffer for reading later. - leftovers: msg.GetChecksummedData().GetContent(), + // Preserve the decoder to read out object data when Read/WriteTo is called. + currMsg: res.decoder, settings: s, zeroRange: params.length == 0, - databuf: databuf, wantCRC: wantCRC, checkCRC: checkCRC, }, @@ -1293,213 +1307,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str return res.Permissions, nil } -// HMAC Key methods. +// HMAC Key methods are not implemented in gRPC client. func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - req := &storagepb.ListHmacKeysRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - ShowDeletedKeys: showDeletedKeys, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } it := &HMACKeysIterator{ ctx: ctx, - projectID: project, - retry: s.retry, + projectID: "", + retry: nil, } - fetch := func(pageSize int, pageToken string) (token string, err error) { - var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func(ctx context.Context) error { - gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) - hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - for _, hkmd := range hmacKeys { - hk := toHMACKeyFromProto(hkmd) - it.hmacKeys = append(it.hmacKeys, hk) - } - - return token, nil + fetch := func(_ int, _ string) (token string, err error) { + return "", errMethodNotSupported } it.pageInfo, it.nextFunc = iterator.NewPageInfo( fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) + func() int { return 0 }, + func() interface{} { return nil }, + ) return it } func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - hk := &storagepb.HmacKeyMetadata{ - AccessId: accessID, - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - State: string(attrs.State), - Etag: attrs.Etag, - } - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if attrs.State != "" { - fieldMask.Paths = append(fieldMask.Paths, "state") - } - req := &storagepb.UpdateHmacKeyRequest{ - HmacKey: hk, - UpdateMask: fieldMask, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.CreateHmacKeyRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - key := toHMACKeyFromProto(res.Metadata) - key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) - - return key, nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent) + return errMethodNotSupported } -// Notification methods. +// Notification methods are not implemented in gRPC client. func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - req := &storagepb.ListNotificationConfigsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - } - var notifications []*storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) - for { - // PageSize is not set and fallbacks to the API default pageSize of 100. - items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) - if err != nil { - return err - } - notifications = append(notifications, items...) - // If there are no more results, nextPageToken is empty and err is nil. - if nextPageToken == "" { - return err - } - req.PageToken = nextPageToken - } - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - - return notificationsToMapFromProto(notifications), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationConfigRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - NotificationConfig: toProtoNotification(n), - } - var pbn *storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - var err error - pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toNotificationFromProto(pbn), err + return nil, errMethodNotSupported } func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationConfigRequest{Name: id} - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) - }, s.retry, s.idempotent) + return errMethodNotSupported } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -1512,8 +1366,8 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context } type readStreamResponse struct { - stream storagepb.Storage_ReadObjectClient - response *storagepb.ReadObjectResponse + stream storagepb.Storage_ReadObjectClient + decoder *readResponseDecoder } type gRPCReader struct { @@ -1522,7 +1376,7 @@ type gRPCReader struct { stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte - databuf []byte + currMsg *readResponseDecoder // decoder for the current message cancel context.CancelFunc settings *settings checkCRC bool // should we check the CRC? @@ -1565,18 +1419,21 @@ func (r *gRPCReader) Read(p []byte) (int, error) { } var n int - // Read leftovers and return what was available to conform to the Reader + + // If there is data remaining in the current message, return what was + // available to conform to the Reader // interface: https://pkg.go.dev/io#Reader. - if len(r.leftovers) > 0 { - n = copy(p, r.leftovers) + if !r.currMsg.done { + n = r.currMsg.readAndUpdateCRC(p, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(n) - r.updateCRC(p[:n]) - r.leftovers = r.leftovers[n:] return n, nil } // Attempt to Recv the next message on the stream. - content, err := r.recv() + // This will update r.currMsg with the decoder for the new message. + err := r.recv() if err != nil { return 0, err } @@ -1588,16 +1445,11 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - n = copy(p[n:], content) - leftover := len(content) - n - if leftover > 0 { - // Wasn't able to copy all of the data in the message, store for - // future Read calls. - r.leftovers = content[n:] - } - r.seen += int64(n) - r.updateCRC(p[:n]) + n = r.currMsg.readAndUpdateCRC(p, func(b []byte) { + r.updateCRC(b) + }) + r.seen += int64(n) return n, nil } @@ -1624,14 +1476,14 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // Track bytes written during before call. var alreadySeen = r.seen - // Write any leftovers to the stream. There will be some leftovers from the + // Write any already received message to the stream. There will be some leftovers from the // original NewRangeReader call. - if len(r.leftovers) > 0 { - // Write() will write the entire leftovers slice unless there is an error. - written, err := w.Write(r.leftovers) + if r.currMsg != nil && !r.currMsg.done { + written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(written) - r.updateCRC(r.leftovers) - r.leftovers = nil + r.currMsg = nil if err != nil { return r.seen - alreadySeen, err } @@ -1642,7 +1494,7 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // Attempt to receive the next message on the stream. // Will terminate with io.EOF once data has all come through. // recv() handles stream reopening and retry logic so no need for retries here. - msg, err := r.recv() + err := r.recv() if err != nil { if err == io.EOF { // We are done; check the checksum if necessary and return. @@ -1658,9 +1510,10 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - written, err := w.Write(msg) + written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(written) - r.updateCRC(msg) if err != nil { return r.seen - alreadySeen, err } @@ -1669,12 +1522,13 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { } // Close cancels the read stream's context in order for it to be closed and -// collected. +// collected, and frees any currently in use buffers. func (r *gRPCReader) Close() error { if r.cancel != nil { r.cancel() } r.stream = nil + r.currMsg = nil return nil } @@ -1689,8 +1543,9 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -func (r *gRPCReader) recv() ([]byte, error) { - err := r.stream.RecvMsg(&r.databuf) +func (r *gRPCReader) recv() error { + databufs := mem.BufferSlice{} + err := r.stream.RecvMsg(&databufs) var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { @@ -1700,16 +1555,16 @@ func (r *gRPCReader) recv() ([]byte, error) { // This will "close" the existing stream and immediately attempt to // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is - // successful, the next logical chunk will be returned. - msg, err := r.reopenStream() - return msg.GetChecksummedData().GetContent(), err + // successful, r.currMsg will be updated to include the new data. + return r.reopenStream() } if err != nil { - return nil, err + return err } - return readObjectResponseContent(r.databuf) + r.currMsg = &readResponseDecoder{databufs: databufs} + return r.currMsg.readFullObjectResponse() } // ReadObjectResponse field and subfield numbers. @@ -1722,21 +1577,297 @@ const ( metadataField = protowire.Number(4) ) -// readObjectResponseContent returns the checksummed_data.content field of a -// ReadObjectResponse message, or an error if the message is invalid. -// This can be used on recvs of objects after the first recv, since only the -// first message will contain non-data fields. -func readObjectResponseContent(b []byte) ([]byte, error) { - checksummedData, err := readProtoBytes(b, checksummedDataField) +// readResponseDecoder is a wrapper on the raw message, used to decode one message +// without copying object data. It also has methods to write out the resulting object +// data to the user application. +type readResponseDecoder struct { + databufs mem.BufferSlice // raw bytes of the message being processed + // Decoding offsets + off uint64 // offset in the messsage relative to the data as a whole + currBuf int // index of the current buffer being processed + currOff uint64 // offset in the current buffer + // Processed data + msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated + dataOffsets bufferSliceOffsets // offsets of the object data in the message. + done bool // true if the data has been completely read. +} + +type bufferSliceOffsets struct { + startBuf, endBuf int // indices of start and end buffers of object data in the msg + startOff, endOff uint64 // offsets within these buffers where the data starts and ends. + currBuf int // index of current buffer being read out to the user application. + currOff uint64 // offset of read in current buffer. +} + +// peek ahead 10 bytes from the current offset in the databufs. This will return a +// slice of the current buffer if the bytes are all in one buffer, but will copy +// the bytes into a new buffer if the distance is split across buffers. Use this +// to allow protowire methods to be used to parse tags & fixed values. +// The max length of a varint tag is 10 bytes, see +// https://protobuf.dev/programming-guides/encoding/#varints . Other int types +// are shorter. +func (d *readResponseDecoder) peek() []byte { + b := d.databufs[d.currBuf].ReadOnlyData() + // Check if the tag will fit in the current buffer. If not, copy the next 10 + // bytes into a new buffer to ensure that we can read the tag correctly + // without it being divided between buffers. + tagBuf := b[d.currOff:] + remainingInBuf := len(tagBuf) + // If we have less than 10 bytes remaining and are not in the final buffer, + // copy up to 10 bytes ahead from the next buffer. + if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 { + tagBuf = d.copyNextBytes(10) + } + return tagBuf +} + +// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the +// buffers overall. Does not advance offsets. +func (d *readResponseDecoder) copyNextBytes(n int) []byte { + remaining := n + if r := d.databufs.Len() - int(d.off); r < remaining { + remaining = r + } + currBuf := d.currBuf + currOff := d.currOff + var buf []byte + for remaining > 0 { + b := d.databufs[currBuf].ReadOnlyData() + remainingInCurr := len(b[currOff:]) + if remainingInCurr < remaining { + buf = append(buf, b[currOff:]...) + remaining -= remainingInCurr + currBuf++ + currOff = 0 + } else { + buf = append(buf, b[currOff:currOff+uint64(remaining)]...) + remaining = 0 + } + } + return buf +} + +// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we +// go past the end of the data. +func (d *readResponseDecoder) advanceOffset(n uint64) error { + remaining := n + for remaining > 0 { + remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff + if remainingInCurr <= remaining { + remaining -= remainingInCurr + d.currBuf++ + d.currOff = 0 + } else { + d.currOff += remaining + remaining = 0 + } + } + // If we have advanced past the end of the buffers, something went wrong. + if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) { + return errors.New("decoding: truncated message, cannot advance offset") + } + d.off += n + return nil + +} + +// This copies object data from the message into the buffer and returns the number of +// bytes copied. The data offsets are incremented in the message. The updateCRC +// function is called on the copied bytes. +func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int { + // For a completely empty message, just return 0 + if len(d.databufs) == 0 { + return 0 + } + databuf := d.databufs[d.dataOffsets.currBuf] + startOff := d.dataOffsets.currOff + var b []byte + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff] + } else { + b = databuf.ReadOnlyData()[startOff:] + } + n := copy(p, b) + updateCRC(b[:n]) + d.dataOffsets.currOff += uint64(n) + + // We've read all the data from this message. Free the underlying buffers. + if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff { + d.done = true + d.databufs.Free() + } + // We are at the end of the current buffer + if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) { + d.dataOffsets.currOff = 0 + d.dataOffsets.currBuf++ + } + return n +} + +func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) { + // For a completely empty message, just return 0 + if len(d.databufs) == 0 { + return 0, nil + } + var written int64 + for !d.done { + databuf := d.databufs[d.dataOffsets.currBuf] + startOff := d.dataOffsets.currOff + var b []byte + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff] + } else { + b = databuf.ReadOnlyData()[startOff:] + } + var n int + // Write all remaining data from the current buffer + n, err := w.Write(b) + written += int64(n) + updateCRC(b) + if err != nil { + return written, err + } + d.dataOffsets.currOff = 0 + // We've read all the data from this message. + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + d.done = true + d.databufs.Free() + } else { + d.dataOffsets.currBuf++ + } + } + return written, nil +} + +// Consume the next available tag in the input data and return the field number and type. +// Advances the relevant offsets in the data. +func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) { + tagBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf) + if tagLength < 0 { + return 0, 0, protowire.ParseError(tagLength) + } + // Update the offsets and current buffer depending on the tag length. + if err := d.advanceOffset(uint64(tagLength)); err != nil { + return 0, 0, fmt.Errorf("consuming tag: %w", err) + } + return fieldNum, fieldType, nil +} + +// Consume a varint that represents the length of a bytes field. Return the length of +// the data, and advance the offsets by the length of the varint. +func (d *readResponseDecoder) consumeVarint() (uint64, error) { + tagBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + dataLength, tagLength := protowire.ConsumeVarint(tagBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return dataLength, nil +} + +func (d *readResponseDecoder) consumeFixed32() (uint32, error) { + valueBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + value, tagLength := protowire.ConsumeFixed32(valueBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return value, nil +} + +func (d *readResponseDecoder) consumeFixed64() (uint64, error) { + valueBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + value, tagLength := protowire.ConsumeFixed64(valueBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return value, nil +} + +// Consume any field values up to the end offset provided and don't return anything. +// This is used to skip any values which are not going to be used. +// msgEndOff is indexed in terms of the overall data across all buffers. +func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error { + // reimplement protowire.ConsumeFieldValue without the extra case for groups (which + // are are complicted and not a thing in proto3). + var err error + switch fieldType { + case protowire.VarintType: + _, err = d.consumeVarint() + case protowire.Fixed32Type: + _, err = d.consumeFixed32() + case protowire.Fixed64Type: + _, err = d.consumeFixed64() + case protowire.BytesType: + _, err = d.consumeBytes() + default: + return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum) + } if err != nil { - return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err) } - content, err := readProtoBytes(checksummedData, checksummedDataContentField) + + return nil +} + +// Consume a bytes field from the input. Returns offsets for the data in the buffer slices +// and an error. +func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) { + // m is the length of the data past the tag. + m, err := d.consumeVarint() if err != nil { - return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) + return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err) + } + offsets := bufferSliceOffsets{ + startBuf: d.currBuf, + startOff: d.currOff, + currBuf: d.currBuf, + currOff: d.currOff, } - return content, nil + // Advance offsets to lengths of bytes field and capture where we end. + d.advanceOffset(m) + offsets.endBuf = d.currBuf + offsets.endOff = d.currOff + return offsets, nil +} + +// Consume a bytes field from the input and copy into a new buffer if +// necessary (if the data is split across buffers in databuf). This can be +// used to leverage proto.Unmarshal for small bytes fields (i.e. anything +// except object data). +func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) { + // m is the length of the bytes data. + m, err := d.consumeVarint() + if err != nil { + return nil, fmt.Errorf("consuming varint: %w", err) + } + // Copy the data into a buffer and advance the offset + b := d.copyNextBytes(int(m)) + if err := d.advanceOffset(m); err != nil { + return nil, fmt.Errorf("advancing offset: %w", err) + } + return b, nil } // readFullObjectResponse returns the ReadObjectResponse that is encoded in the @@ -1746,21 +1877,17 @@ func readObjectResponseContent(b []byte) ([]byte, error) { // This function is essentially identical to proto.Unmarshal, except it aliases // the data in the input []byte. If the proto library adds a feature to // Unmarshal that does that, this function can be dropped. -func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { +func (d *readResponseDecoder) readFullObjectResponse() error { msg := &storagepb.ReadObjectResponse{} // Loop over the entire message, extracting fields as we go. This does not // handle field concatenation, in which the contents of a single field // are split across multiple protobuf tags. - off := 0 - for off < len(b) { - // Consume the next tag. This will tell us which field is next in the - // buffer, its type, and how much space it takes up. - fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) - if fieldLength < 0 { - return nil, protowire.ParseError(fieldLength) + for d.off < uint64(d.databufs.Len()) { + fieldNum, fieldType, err := d.consumeTag() + if err != nil { + return fmt.Errorf("consuming next tag: %w", err) } - off += fieldLength // Unmarshal the field according to its type. Only fields that are not // nil will be present. @@ -1769,142 +1896,95 @@ func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { // The ChecksummedData field was found. Initialize the struct. msg.ChecksummedData = &storagepb.ChecksummedData{} - // Get the bytes corresponding to the checksummed data. - fieldContent, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + bytesFieldLen, err := d.consumeVarint() + if err != nil { + return fmt.Errorf("consuming bytes: %v", err) } - off += n - - // Get the nested fields. We need to do this manually as it contains - // the object content bytes. - contentOff := 0 - for contentOff < len(fieldContent) { - gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) - if n < 0 { - return nil, protowire.ParseError(n) + + var contentEndOff = d.off + bytesFieldLen + for d.off < contentEndOff { + gotNum, gotTyp, err := d.consumeTag() + if err != nil { + return fmt.Errorf("consuming checksummedData tag: %w", err) } - contentOff += n switch { case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: - // Get the content bytes. - bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + // Get the offsets of the content bytes. + d.dataOffsets, err = d.consumeBytes() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err) } - msg.ChecksummedData.Content = bytes - contentOff += n case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + v, err := d.consumeFixed32() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err) } msg.ChecksummedData.Crc32C = &v - contentOff += n default: - n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) - if n < 0 { - return nil, protowire.ParseError(n) + err := d.consumeFieldValue(gotNum, gotTyp) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err) } - contentOff += n } } case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: // The field was found. Initialize the struct. msg.ObjectChecksums = &storagepb.ObjectChecksums{} - - // Get the bytes corresponding to the checksums. - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + // Consume the bytes and copy them into a single buffer if they are split across buffers. + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err) } - off += n - // Unmarshal. - if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil { + return err } case fieldNum == contentRangeField && fieldType == protowire.BytesType: msg.ContentRange = &storagepb.ContentRange{} - - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err) } - off += n - - if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.ContentRange); err != nil { + return err } case fieldNum == metadataField && fieldType == protowire.BytesType: msg.Metadata = &storagepb.Object{} - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err) } - off += n - if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.Metadata); err != nil { + return err } default: - fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) - if fieldLength < 0 { - return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) - } - off += fieldLength - } - } - - return msg, nil -} - -// readProtoBytes returns the contents of the protobuf field with number num -// and type bytes from a wire-encoded message. If the field cannot be found, -// the returned slice will be nil and no error will be returned. -// -// It does not handle field concatenation, in which the contents of a single field -// are split across multiple protobuf tags. Encoded data containing split fields -// of this form is technically permissable, but uncommon. -func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { - off := 0 - for off < len(b) { - gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - off += n - if gotNum == num && gotTyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) + err := d.consumeFieldValue(fieldNum, fieldType) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse: %w", err) } - return b, nil } - n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - off += n } - return nil, nil + d.msg = msg + return nil } // reopenStream "closes" the existing stream and attempts to reopen a stream and // sets the Reader's stream and cancelStream properties in the process. -func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { +func (r *gRPCReader) reopenStream() error { // Close existing stream and initialize new stream with updated offset. r.Close() res, cancel, err := r.reopen(r.seen) if err != nil { - return nil, err + return err } r.stream = res.stream + r.currMsg = res.decoder r.cancel = cancel - return res.response, nil + return nil } func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go new file mode 100644 index 000000000..d34227334 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_dp.go @@ -0,0 +1,22 @@ +//go:build !disable_grpc_modules + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" +) diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go new file mode 100644 index 000000000..149b37807 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go @@ -0,0 +1,281 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric" + "github.com/google/uuid" + "go.opentelemetry.io/contrib/detectors/gcp" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + "google.golang.org/grpc/stats/opentelemetry" +) + +const ( + monitoredResourceName = "storage.googleapis.com/Client" + metricPrefix = "storage.googleapis.com/client/" +) + +func latencyHistogramBoundaries() []float64 { + boundaries := []float64{} + boundary := 0.0 + increment := 0.002 + // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range + for i := 0; i < 50; i++ { + boundaries = append(boundaries, boundary) + // increment by 2ms + boundary += increment + } + // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes + for i := 0; i < 150 && boundary < 300; i++ { + boundaries = append(boundaries, boundary) + if i != 0 && i%10 == 0 { + increment *= 2 + } + boundary += increment + } + return boundaries +} + +func sizeHistogramBoundaries() []float64 { + kb := 1024.0 + mb := 1024.0 * kb + gb := 1024.0 * mb + boundaries := []float64{} + boundary := 0.0 + increment := 128 * kb + // 128 KiB increments up to 4MiB, then exponential growth + for len(boundaries) < 200 && boundary <= 16*gb { + boundaries = append(boundaries, boundary) + boundary += increment + if boundary >= 4*mb { + increment *= 2 + } + } + return boundaries +} + +func metricFormatter(m metricdata.Metrics) string { + return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") +} + +func gcpAttributeExpectedDefaults() []attribute.KeyValue { + return []attribute.KeyValue{ + {Key: "location", Value: attribute.StringValue("global")}, + {Key: "cloud_platform", Value: attribute.StringValue("unknown")}, + {Key: "host_id", Value: attribute.StringValue("unknown")}} +} + +// Added to help with tests +type preparedResource struct { + projectToUse string + resource *resource.Resource +} + +func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) { + detectedAttrs, err := resource.New(ctx, resourceOptions...) + if err != nil { + return nil, err + } + preparedResource := &preparedResource{} + s := detectedAttrs.Set() + p, present := s.Value("cloud.account.id") + if present { + preparedResource.projectToUse = p.AsString() + } else { + preparedResource.projectToUse = project + } + updates := []attribute.KeyValue{} + for _, kv := range gcpAttributeExpectedDefaults() { + if val, present := s.Value(kv.Key); !present || val.AsString() == "" { + updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value}) + } + } + r, err := resource.New( + ctx, + resource.WithAttributes( + attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)}, + attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())}, + attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)}, + attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")}, + ), + resource.WithAttributes(detectedAttrs.Attributes()...), + // Last duplicate key / value wins + resource.WithAttributes(updates...), + ) + if err != nil { + return nil, err + } + preparedResource.resource = r + return preparedResource, nil +} + +type metricsContext struct { + // client options passed to gRPC channels + clientOpts []option.ClientOption + // instance of metric reader used by gRPC client-side metrics + provider *metric.MeterProvider + // clean func to call when closing gRPC client + close func() +} + +func createHistogramView(name string, boundaries []float64) metric.View { + return metric.NewView(metric.Instrument{ + Name: name, + Kind: metric.InstrumentKindHistogram, + }, metric.Stream{ + Name: name, + Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries}, + }) +} + +func newGRPCMetricContext(ctx context.Context, project string, config storageConfig) (*metricsContext, error) { + var exporter metric.Exporter + meterOpts := []metric.Option{} + if config.metricExporter != nil { + exporter = *config.metricExporter + } else { + preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())}) + if err != nil { + return nil, err + } + meterOpts = append(meterOpts, metric.WithResource(preparedResource.resource)) + // Implementation requires a project, if one is not determined possibly user + // credentials. Then we will fail stating gRPC Metrics require a project-id. + if project == "" && preparedResource.projectToUse == "" { + return nil, fmt.Errorf("google cloud project is required to start client-side metrics") + } + // If projectTouse isn't the same as project provided to Storage client, then + // emit a log stating which project is being used to emit metrics to. + if project != preparedResource.projectToUse { + log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project) + } + meOpts := []mexporter.Option{ + mexporter.WithProjectID(preparedResource.projectToUse), + mexporter.WithMetricDescriptorTypeFormatter(metricFormatter), + mexporter.WithCreateServiceTimeSeries(), + mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})} + exporter, err = mexporter.New(meOpts...) + if err != nil { + return nil, err + } + } + // Metric views update histogram boundaries to be relevant to GCS + // otherwise default OTel histogram boundaries are used. + metricViews := []metric.View{ + createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()), + createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()), + createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()), + } + interval := time.Minute + if config.metricInterval > 0 { + interval = config.metricInterval + } + meterOpts = append(meterOpts, metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(interval))), + metric.WithView(metricViews...)) + provider := metric.NewMeterProvider(meterOpts...) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add( + "grpc.lb.wrr.rr_fallback", + "grpc.lb.wrr.endpoint_weight_not_yet_usable", + "grpc.lb.wrr.endpoint_weight_stale", + "grpc.lb.wrr.endpoint_weights", + "grpc.lb.rls.cache_entries", + "grpc.lb.rls.cache_size", + "grpc.lb.rls.default_target_picks", + "grpc.lb.rls.target_picks", + "grpc.lb.rls.failed_picks"), + OptionalLabels: []string{"grpc.lb.locality"}, + } + opts := []option.ClientOption{ + option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})), + } + context := &metricsContext{ + clientOpts: opts, + provider: provider, + close: createShutdown(ctx, provider), + } + return context, nil +} + +func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) { + var project string + c, err := transport.Creds(ctx, s.clientOption...) + if err == nil { + project = c.ProjectID + } + // Enable client-side metrics for gRPC + metricsContext, err := newGRPCMetricContext(ctx, project, config) + if err != nil { + return nil, fmt.Errorf("gRPC Metrics: %w", err) + } + return metricsContext, nil +} + +func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() { + return func() { + provider.Shutdown(ctx) + } +} + +// Silences permission errors after initial error is emitted to prevent +// chatty logs. +type exporterLogSuppressor struct { + exporter metric.Exporter + emittedFailure bool +} + +// Implements OTel SDK metric.Exporter interface to prevent noisy logs from +// lack of credentials after initial failure. +// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter +func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure { + if strings.Contains(err.Error(), "PermissionDenied") { + e.emittedFailure = true + return fmt.Errorf("gRPC metrics failed due permission issue: %w", err) + } + return err + } + return nil +} + +func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality { + return e.exporter.Temporality(k) +} + +func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation { + return e.exporter.Aggregation(k) +} + +func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error { + return e.exporter.ForceFlush(ctx) +} + +func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error { + return e.exporter.Shutdown(ctx) +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index f7811a5d1..2387fd33c 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) @@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // Options such as UserProjectForHMACKeys can be used to set the // userProject to be billed against for operations. +// Note: gRPC is not supported. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. // Only inactive HMAC keys can be deleted. // After deletion, a key cannot be used to authenticate requests. +// Note: gRPC is not supported. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro return hmKey, nil } -func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { - if pbmd == nil { - return nil - } - - return &HMACKey{ - AccessID: pbmd.GetAccessId(), - ID: pbmd.GetId(), - State: HMACState(pbmd.GetState()), - ProjectID: pbmd.GetProject(), - CreatedTime: convertProtoTime(pbmd.GetCreateTime()), - UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), - ServiceAccountEmail: pbmd.GetServiceAccountEmail(), - } -} - // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. +// Note: gRPC is not supported. func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") @@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct { } // Update mutates the HMACKey referred to by accessID. +// Note: gRPC is not supported. func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) @@ -237,6 +224,7 @@ type HMACKeysIterator struct { // ListHMACKeys returns an iterator for listing HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. +// Note: gRPC is not supported. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { desc := new(hmacKeyDesc) for _, opt := range opts { diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e213a663..6baf90547 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -22,6 +22,7 @@ import ( "hash/crc32" "io" "io/ioutil" + "log" "net/http" "net/url" "os" @@ -47,13 +48,14 @@ import ( // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic // storageClient interface. type httpStorageClient struct { - creds *google.Credentials - hc *http.Client - xmlHost string - raw *raw.Service - scheme string - settings *settings - config *storageConfig + creds *google.Credentials + hc *http.Client + xmlHost string + raw *raw.Service + scheme string + settings *settings + config *storageConfig + dynamicReadReqStallTimeout *bucketDelayManager } // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON @@ -128,14 +130,29 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) } + var bd *bucketDelayManager + if config.readStallTimeoutConfig != nil { + drrstConfig := config.readStallTimeoutConfig + bd, err = newBucketDelayManager( + drrstConfig.TargetPercentile, + getDynamicReadReqIncreaseRateFromEnv(), + getDynamicReadReqInitialTimeoutSecFromEnv(drrstConfig.Min), + drrstConfig.Min, + defaultDynamicReqdReqMaxTimeout) + if err != nil { + return nil, fmt.Errorf("creating dynamic-delay: %w", err) + } + } + return &httpStorageClient{ - creds: creds, - hc: hc, - xmlHost: u.Host, - raw: rawService, - scheme: u.Scheme, - settings: s, - config: &config, + creds: creds, + hc: hc, + xmlHost: u.Host, + raw: rawService, + scheme: u.Scheme, + settings: s, + config: &config, + dynamicReadReqStallTimeout: bd, }, nil } @@ -857,15 +874,47 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa reopen := readerReopen(ctx, req.Header, params, s, func(ctx context.Context) (*http.Response, error) { - // Set custom headers passed in via the context. This is only required for XML; - // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. - ctxHeaders := callctx.HeadersFromContext(ctx) - for k, vals := range ctxHeaders { - for _, v := range vals { - req.Header.Set(k, v) + setHeadersFromCtx(ctx, req.Header) + + if c.dynamicReadReqStallTimeout == nil { + return c.hc.Do(req.WithContext(ctx)) + } + + cancelCtx, cancel := context.WithCancel(ctx) + var ( + res *http.Response + err error + ) + + done := make(chan bool) + go func() { + reqStartTime := time.Now() + res, err = c.hc.Do(req.WithContext(cancelCtx)) + if err == nil { + reqLatency := time.Since(reqStartTime) + c.dynamicReadReqStallTimeout.update(params.bucket, reqLatency) + } else if errors.Is(err, context.Canceled) { + // context.Canceled means operation took more than current dynamicTimeout, + // hence should be increased. + c.dynamicReadReqStallTimeout.increase(params.bucket) + } + done <- true + }() + + // Wait until timeout or request is successful. + timer := time.After(c.dynamicReadReqStallTimeout.getValue(params.bucket)) + select { + case <-timer: + log.Printf("stalled read-req cancelled after %fs", c.dynamicReadReqStallTimeout.getValue(params.bucket).Seconds()) + cancel() + err = context.DeadlineExceeded + if res != nil && res.Body != nil { + res.Body.Close() } + case <-done: + cancel = nil } - return c.hc.Do(req.WithContext(ctx)) + return res, err }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -1422,18 +1471,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } else { size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } + } + + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + crc, checkCRC = parseCRC32c(res) + if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) { + checkCRC = false } remain := res.ContentLength @@ -1470,6 +1521,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen StartOffset: startOffset, Generation: params.gen, Metageneration: metaGen, + CRC32C: crc, + Decompressed: res.Uncompressed || uncompressedByServer(res), } return &Reader{ Attrs: attrs, @@ -1484,3 +1537,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen }, }, nil } + +// setHeadersFromCtx sets custom headers passed in via the context on the header, +// replacing any header with the same key (which avoids duplicating invocation headers). +// This is only required for XML; for gRPC & JSON requests this is handled in +// the GAPIC and Apiary layers respectively. +func setHeadersFromCtx(ctx context.Context, header http.Header) { + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + // Merge x-goog-api-client values into a single space-separated value. + if strings.EqualFold(k, xGoogHeaderKey) { + alreadySetValues := header.Values(xGoogHeaderKey) + vals = append(vals, alreadySetValues...) + + if len(vals) > 0 { + xGoogHeader := vals[0] + for _, v := range vals[1:] { + xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") + } + header.Set(k, xGoogHeader) + } + } else { + for _, v := range vals { + header.Set(k, v) + } + } + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index 415b2b585..fc3e783bd 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -68,100 +68,6 @@ func (it *BucketIterator) takeBuf() interface{} { return b } -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. -type NotificationConfigIterator struct { - items []*storagepb.NotificationConfig - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { - var item *storagepb.NotificationConfig - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationConfigIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationConfigIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - // ObjectIterator manages a stream of *storagepb.Object. type ObjectIterator struct { items []*storagepb.Object diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go new file mode 100644 index 000000000..e6cb160d0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package storage + +import ( + "iter" + + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 5e2a8f0ad..869f3b1fb 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -17,19 +17,15 @@ // Package storage is an auto-generated package for the // Cloud Storage API. // -// Stop. This folder is likely not what you are looking for. This folder -// contains protocol buffer definitions for an API only accessible to select -// customers. Customers not participating should not depend on this file. -// Please contact Google Cloud sales if you are interested. Unless told -// otherwise by a Google Cloud representative, do not use or otherwise rely -// on any of the contents of this folder. If you would like to use Cloud -// Storage, please consult our official documentation (at +// This folder contains protocol buffer definitions for an API only +// accessible to select customers. Customers not participating should not +// depend on this file. Please contact Google Cloud sales if you are +// interested. Unless told otherwise by a Google Cloud representative, do not +// use or otherwise rely on any of the contents of this folder. If you would +// like to use Cloud Storage, please consult our official documentation (at // https://cloud.google.com/storage/docs/apis) for details on our XML and // JSON APIs, or else consider one of our client libraries (at -// https://cloud.google.com/storage/docs/reference/libraries). This API -// defined in this folder is unreleased and may shut off, break, or fail at -// any time for any users who are not registered as a part of a private -// preview program. +// https://cloud.google.com/storage/docs/reference/libraries). // // # General documentation // diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 56256bb2c..7b0241b4b 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -30,31 +30,11 @@ "CreateBucket" ] }, - "CreateHmacKey": { - "methods": [ - "CreateHmacKey" - ] - }, - "CreateNotificationConfig": { - "methods": [ - "CreateNotificationConfig" - ] - }, "DeleteBucket": { "methods": [ "DeleteBucket" ] }, - "DeleteHmacKey": { - "methods": [ - "DeleteHmacKey" - ] - }, - "DeleteNotificationConfig": { - "methods": [ - "DeleteNotificationConfig" - ] - }, "DeleteObject": { "methods": [ "DeleteObject" @@ -65,46 +45,21 @@ "GetBucket" ] }, - "GetHmacKey": { - "methods": [ - "GetHmacKey" - ] - }, "GetIamPolicy": { "methods": [ "GetIamPolicy" ] }, - "GetNotificationConfig": { - "methods": [ - "GetNotificationConfig" - ] - }, "GetObject": { "methods": [ "GetObject" ] }, - "GetServiceAccount": { - "methods": [ - "GetServiceAccount" - ] - }, "ListBuckets": { "methods": [ "ListBuckets" ] }, - "ListHmacKeys": { - "methods": [ - "ListHmacKeys" - ] - }, - "ListNotificationConfigs": { - "methods": [ - "ListNotificationConfigs" - ] - }, "ListObjects": { "methods": [ "ListObjects" @@ -155,11 +110,6 @@ "UpdateBucket" ] }, - "UpdateHmacKey": { - "methods": [ - "UpdateHmacKey" - ] - }, "UpdateObject": { "methods": [ "UpdateObject" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 82ec5db90..7890a6b39 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -50,10 +50,6 @@ type CallOptions struct { SetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption UpdateBucket []gax.CallOption - DeleteNotificationConfig []gax.CallOption - GetNotificationConfig []gax.CallOption - CreateNotificationConfig []gax.CallOption - ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption RestoreObject []gax.CallOption @@ -67,12 +63,6 @@ type CallOptions struct { RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption QueryWriteStatus []gax.CallOption - GetServiceAccount []gax.CallOption - CreateHmacKey []gax.CallOption - DeleteHmacKey []gax.CallOption - GetHmacKey []gax.CallOption - ListHmacKeys []gax.CallOption - UpdateHmacKey []gax.CallOption } func defaultGRPCClientOptions() []option.ClientOption { @@ -84,6 +74,7 @@ func defaultGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://storage.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -208,58 +199,6 @@ func defaultCallOptions() *CallOptions { }) }), }, - DeleteNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListNotificationConfigs: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, ComposeObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -426,84 +365,6 @@ func defaultCallOptions() *CallOptions { }) }), }, - GetServiceAccount: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - DeleteHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListHmacKeys: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - UpdateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, } } @@ -521,10 +382,6 @@ type internalClient interface { SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error - GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) @@ -538,12 +395,6 @@ type internalClient interface { RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) - GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) - CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) - DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error - GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) - ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator - UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) } // Client is a client for interacting with Cloud Storage API. @@ -641,11 +492,13 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques return c.internalClient.SetIamPolicy(ctx, req, opts...) } -// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if -// any, are held by the caller. +// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder +// to see which, if any, are held by the caller. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket} for a bucket, +// projects/_/buckets/{bucket}/objects/{object} for an object, or +// projects/_/buckets/{bucket}/managedFolders/{managedFolder} +// for a managed folder. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } @@ -655,29 +508,6 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe return c.internalClient.UpdateBucket(ctx, req, opts...) } -// DeleteNotificationConfig permanently deletes a NotificationConfig. -func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) -} - -// GetNotificationConfig view a NotificationConfig. -func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.GetNotificationConfig(ctx, req, opts...) -} - -// CreateNotificationConfig creates a NotificationConfig for a given bucket. -// These NotificationConfigs, when triggered, publish messages to the -// specified Pub/Sub topics. See -// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.CreateNotificationConfig(ctx, req, opts...) -} - -// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. -func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - return c.internalClient.ListNotificationConfigs(ctx, req, opts...) -} - // ComposeObject concatenates a list of existing objects into a new object in the same // bucket. func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { @@ -848,36 +678,6 @@ func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWrite return c.internalClient.QueryWriteStatus(ctx, req, opts...) } -// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. -func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - return c.internalClient.GetServiceAccount(ctx, req, opts...) -} - -// CreateHmacKey creates a new HMAC key for the given service account. -func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - return c.internalClient.CreateHmacKey(ctx, req, opts...) -} - -// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. -func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteHmacKey(ctx, req, opts...) -} - -// GetHmacKey gets an existing HMAC key metadata for the given id. -func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.GetHmacKey(ctx, req, opts...) -} - -// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. -func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - return c.internalClient.ListHmacKeys(ctx, req, opts...) -} - -// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. -func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.UpdateHmacKey(ctx, req, opts...) -} - // gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. @@ -1198,6 +998,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } @@ -1246,138 +1049,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck return resp, nil } -func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) - it := &NotificationConfigIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { - resp := &storagepb.ListNotificationConfigsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1731,189 +1402,3 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW } return resp, nil } - -func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) - var resp *storagepb.ServiceAccount - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) - var resp *storagepb.CreateHmacKeyResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) - it := &HmacKeyMetadataIterator{} - req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { - resp := &storagepb.ListHmacKeysResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetHmacKeys(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index aeb7512f4..349200cfc 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29, 0} } // Request message for DeleteBucket. @@ -743,296 +743,6 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } -// Request message for DeleteNotificationConfig. -type DeleteNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteNotificationConfigRequest) Reset() { - *x = DeleteNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNotificationConfigRequest) ProtoMessage() {} - -func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for GetNotificationConfig. -type GetNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationConfigRequest) Reset() { - *x = GetNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationConfigRequest) ProtoMessage() {} - -func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} -} - -func (x *GetNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for CreateNotificationConfig. -type CreateNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to which this NotificationConfig belongs. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the NotificationConfig to be inserted. - NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` -} - -func (x *CreateNotificationConfigRequest) Reset() { - *x = CreateNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNotificationConfigRequest) ProtoMessage() {} - -func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} -} - -func (x *CreateNotificationConfigRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { - if x != nil { - return x.NotificationConfig - } - return nil -} - -// Request message for ListNotifications. -type ListNotificationConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a Google Cloud Storage bucket. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of NotificationConfigs to return. The service may - // return fewer than this value. The default value is 100. Specifying a value - // above 100 will result in a page_size of 100. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotificationConfigs` call. - // Provide this to retrieve the subsequent page. - // - // When paginating, all other parameters provided to `ListNotificationConfigs` - // must match the call that provided the page token. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationConfigsRequest) Reset() { - *x = ListNotificationConfigsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsRequest) ProtoMessage() {} - -func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} -} - -func (x *ListNotificationConfigsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListNotificationConfigsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationConfigsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The result of a call to ListNotificationConfigs -type ListNotificationConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` - // A token, which can be sent as `page_token` to retrieve the next page. - // If this field is omitted, there are no subsequent pages. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationConfigsResponse) Reset() { - *x = ListNotificationConfigsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsResponse) ProtoMessage() {} - -func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} -} - -func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { - if x != nil { - return x.NotificationConfigs - } - return nil -} - -func (x *ListNotificationConfigsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - // Request message for ComposeObject. type ComposeObjectRequest struct { state protoimpl.MessageState @@ -1069,7 +779,7 @@ type ComposeObjectRequest struct { func (x *ComposeObjectRequest) Reset() { *x = ComposeObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] + mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1082,7 +792,7 @@ func (x *ComposeObjectRequest) String() string { func (*ComposeObjectRequest) ProtoMessage() {} func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] + mi := &file_google_storage_v2_storage_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1095,7 +805,7 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} } func (x *ComposeObjectRequest) GetDestination() *Object { @@ -1192,7 +902,7 @@ type DeleteObjectRequest struct { func (x *DeleteObjectRequest) Reset() { *x = DeleteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] + mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1205,7 +915,7 @@ func (x *DeleteObjectRequest) String() string { func (*DeleteObjectRequest) ProtoMessage() {} func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] + mi := &file_google_storage_v2_storage_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1218,7 +928,7 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} } func (x *DeleteObjectRequest) GetBucket() string { @@ -1290,6 +1000,12 @@ type RestoreObjectRequest struct { Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` // Required. The specific revision of the object to restore. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets. This parameter is optional, and is only required in the rare case + // when there are multiple soft-deleted objects with the same name and + // generation. + RestoreToken string `protobuf:"bytes,11,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"` // Makes the operation conditional on whether the object's current generation // matches the given value. Setting to 0 makes the operation succeed only if // there are no live versions of the object. @@ -1316,7 +1032,7 @@ type RestoreObjectRequest struct { func (x *RestoreObjectRequest) Reset() { *x = RestoreObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1329,7 +1045,7 @@ func (x *RestoreObjectRequest) String() string { func (*RestoreObjectRequest) ProtoMessage() {} func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1342,7 +1058,7 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} } func (x *RestoreObjectRequest) GetBucket() string { @@ -1366,6 +1082,13 @@ func (x *RestoreObjectRequest) GetGeneration() int64 { return 0 } +func (x *RestoreObjectRequest) GetRestoreToken() string { + if x != nil { + return x.RestoreToken + } + return "" +} + func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { if x != nil && x.IfGenerationMatch != nil { return *x.IfGenerationMatch @@ -1423,7 +1146,7 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1436,7 +1159,7 @@ func (x *CancelResumableWriteRequest) String() string { func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1449,7 +1172,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} } func (x *CancelResumableWriteRequest) GetUploadId() string { @@ -1470,7 +1193,7 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1483,7 +1206,7 @@ func (x *CancelResumableWriteResponse) String() string { func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1496,7 +1219,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} } // Request message for ReadObject. @@ -1558,7 +1281,7 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1571,7 +1294,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1584,7 +1307,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} } func (x *ReadObjectRequest) GetBucket() string { @@ -1701,12 +1424,18 @@ type GetObjectRequest struct { // metadata.owner. // * may be used to mean "all fields". ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets and if soft_deleted is set to true. This parameter is optional, and + // is only required in the rare case when there are multiple soft-deleted + // objects with the same name and generation. + RestoreToken string `protobuf:"bytes,12,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"` } func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1719,7 +1448,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1732,7 +1461,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} } func (x *GetObjectRequest) GetBucket() string { @@ -1805,6 +1534,13 @@ func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { return nil } +func (x *GetObjectRequest) GetRestoreToken() string { + if x != nil { + return x.RestoreToken + } + return "" +} + // Response message for ReadObject. type ReadObjectResponse struct { state protoimpl.MessageState @@ -1832,7 +1568,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1845,7 +1581,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1858,7 +1594,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1929,7 +1665,7 @@ type WriteObjectSpec struct { func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1942,7 +1678,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1955,7 +1691,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} } func (x *WriteObjectSpec) GetResource() *Object { @@ -2059,7 +1795,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2072,7 +1808,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2085,7 +1821,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2201,7 +1937,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2214,7 +1950,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2227,7 +1963,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2306,8 +2042,7 @@ type BidiWriteObjectRequest struct { Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service // don't match the specified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). + // provided in last request (with finish_write set). ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // For each BidiWriteObjectRequest where state_lookup is `true` or the client // closes the stream, the service will send a BidiWriteObjectResponse @@ -2339,7 +2074,7 @@ type BidiWriteObjectRequest struct { func (x *BidiWriteObjectRequest) Reset() { *x = BidiWriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2352,7 +2087,7 @@ func (x *BidiWriteObjectRequest) String() string { func (*BidiWriteObjectRequest) ProtoMessage() {} func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2365,7 +2100,7 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { @@ -2495,7 +2230,7 @@ type BidiWriteObjectResponse struct { func (x *BidiWriteObjectResponse) Reset() { *x = BidiWriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2508,7 +2243,7 @@ func (x *BidiWriteObjectResponse) String() string { func (*BidiWriteObjectResponse) ProtoMessage() {} func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2521,7 +2256,7 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { @@ -2630,7 +2365,7 @@ type ListObjectsRequest struct { func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2643,7 +2378,7 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2656,7 +2391,7 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (x *ListObjectsRequest) GetParent() string { @@ -2766,7 +2501,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2779,7 +2514,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2792,7 +2527,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2827,7 +2562,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2840,7 +2575,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2853,7 +2588,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -3011,7 +2746,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3024,7 +2759,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3037,7 +2772,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -3227,7 +2962,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3240,7 +2975,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3253,7 +2988,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -3312,7 +3047,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3325,7 +3060,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3338,7 +3073,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -3376,7 +3111,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3389,7 +3124,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3402,7 +3137,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -3459,7 +3194,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3472,7 +3207,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3485,7 +3220,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -3544,87 +3279,40 @@ func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } -// Request message for GetServiceAccount. -type GetServiceAccountRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Project ID, in the format of "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetServiceAccountRequest) Reset() { - *x = GetServiceAccountRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceAccountRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceAccountRequest) ProtoMessage() {} - -func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. -func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} -} - -func (x *GetServiceAccountRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request message for CreateHmacKey. -type CreateHmacKeyRequest struct { +// Parameters that can be passed to any object request. +type CommonObjectRequestParams struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/{projectIdentifier}". {projectIdentifier} can be the - // project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Required. The service account to create the HMAC for. - ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // Encryption algorithm used with the Customer-Supplied Encryption Keys + // feature. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // Encryption key used with the Customer-Supplied Encryption Keys feature. + // In raw bytes format (not base64-encoded). + EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` + // SHA256 hash of encryption key used with the Customer-Supplied Encryption + // Keys feature. + EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` } -func (x *CreateHmacKeyRequest) Reset() { - *x = CreateHmacKeyRequest{} +func (x *CommonObjectRequestParams) Reset() { + *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateHmacKeyRequest) String() string { +func (x *CommonObjectRequestParams) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateHmacKeyRequest) ProtoMessage() {} +func (*CommonObjectRequestParams) ProtoMessage() {} -func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] +func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3635,115 +3323,56 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} -} - -func (x *CreateHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" +// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. +func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } -func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { +func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { if x != nil { - return x.ServiceAccountEmail + return x.EncryptionAlgorithm } return "" } -// Create hmac response. The only time the secret for an HMAC will be returned. -type CreateHmacKeyResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key metadata. - Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // HMAC key secret material. - // In raw bytes format (not base64-encoded). - SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` -} - -func (x *CreateHmacKeyResponse) Reset() { - *x = CreateHmacKeyResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyResponse) ProtoMessage() {} - -func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} -} - -func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { +func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { if x != nil { - return x.Metadata + return x.EncryptionKeyBytes } return nil } -func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { +func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { if x != nil { - return x.SecretKeyBytes + return x.EncryptionKeySha256Bytes } return nil } -// Request object to delete a given HMAC key. -type DeleteHmacKeyRequest struct { +// Shared constants. +type ServiceConstants struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project that owns the HMAC key, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } -func (x *DeleteHmacKeyRequest) Reset() { - *x = DeleteHmacKeyRequest{} +func (x *ServiceConstants) Reset() { + *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteHmacKeyRequest) String() string { +func (x *ServiceConstants) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteHmacKeyRequest) ProtoMessage() {} +func (*ServiceConstants) ProtoMessage() {} -func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] +func (x *ServiceConstants) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3754,424 +3383,29 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} -} - -func (x *DeleteHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *DeleteHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" +// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. +func (*ServiceConstants) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } -// Request object to get metadata on a given HMAC key. -type GetHmacKeyRequest struct { +// A bucket. +type Bucket struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project the HMAC key lies in, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetHmacKeyRequest) Reset() { - *x = GetHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetHmacKeyRequest) ProtoMessage() {} - -func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} -} - -func (x *GetHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *GetHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request to fetch a list of HMAC keys under a given project. -type ListHmacKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to list HMAC keys for, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // The maximum number of keys to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously returned token from ListHmacKeysResponse to get the next page. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, filters to only return HMAC keys for specified service account. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // If set, return deleted keys that have not yet been wiped out. - ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` -} - -func (x *ListHmacKeysRequest) Reset() { - *x = ListHmacKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysRequest) ProtoMessage() {} - -func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. -func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} -} - -func (x *ListHmacKeysRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListHmacKeysRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListHmacKeysRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { - if x != nil { - return x.ShowDeletedKeys - } - return false -} - -// Hmac key list response with next page information. -type ListHmacKeysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListHmacKeysResponse) Reset() { - *x = ListHmacKeysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysResponse) ProtoMessage() {} - -func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. -func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} -} - -func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { - if x != nil { - return x.HmacKeys - } - return nil -} - -func (x *ListHmacKeysResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request object to update an HMAC key state. -// HmacKeyMetadata.state is required and the only writable field in -// UpdateHmacKey operation. Specifying fields other than state will result in an -// error. -type UpdateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The HMAC key to update. - // If present, the hmac_key's `id` field will be used to identify the key. - // Otherwise, the hmac_key's access_id and project fields will be used to - // identify the key. - HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` - // Update mask for hmac_key. - // Not specifying any fields will mean only the `state` field is updated to - // the value specified in `hmac_key`. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateHmacKeyRequest) Reset() { - *x = UpdateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateHmacKeyRequest) ProtoMessage() {} - -func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} -} - -func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { - if x != nil { - return x.HmacKey - } - return nil -} - -func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Parameters that can be passed to any object request. -type CommonObjectRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Encryption algorithm used with the Customer-Supplied Encryption Keys - // feature. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // Encryption key used with the Customer-Supplied Encryption Keys feature. - // In raw bytes format (not base64-encoded). - EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` - // SHA256 hash of encryption key used with the Customer-Supplied Encryption - // Keys feature. - EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` -} - -func (x *CommonObjectRequestParams) Reset() { - *x = CommonObjectRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CommonObjectRequestParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CommonObjectRequestParams) ProtoMessage() {} - -func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. -func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} -} - -func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { - if x != nil { - return x.EncryptionKeyBytes - } - return nil -} - -func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.EncryptionKeySha256Bytes - } - return nil -} - -// Shared constants. -type ServiceConstants struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ServiceConstants) Reset() { - *x = ServiceConstants{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConstants) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConstants) ProtoMessage() {} - -func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. -func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} -} - -// A bucket. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of the bucket. - // Format: `projects/{project}/buckets/{bucket}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Output only. The user-chosen part of the bucket name. The `{bucket}` - // portion of the `name` field. For globally unique buckets, this is equal to - // the "bucket name" of other Cloud Storage APIs. Example: "pub". - BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // The etag of the bucket. - // If included in the metadata of an UpdateBucketRequest, the operation will - // only be performed if the etag matches that of the bucket. - Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The project which owns this bucket, in the format of + // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the "bucket name" of other Cloud Storage APIs. Example: "pub". + BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The project which owns this bucket, in the format of // "projects/{projectIdentifier}". // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` @@ -4268,7 +3502,8 @@ type Bucket struct { // Reserved for future use. SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` // Configuration that, if present, specifies the data placement for a - // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. + // [https://cloud.google.com/storage/docs/locations#location-dr][configurable + // dual-region]. CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. @@ -4285,7 +3520,7 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4298,7 +3533,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4311,7 +3546,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *Bucket) GetName() string { @@ -4574,7 +3809,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4587,7 +3822,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4600,7 +3835,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *BucketAccessControl) GetRole() string { @@ -4682,7 +3917,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4695,7 +3930,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4708,7 +3943,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *ChecksummedData) GetContent() []byte { @@ -4734,230 +3969,35 @@ type ObjectChecksums struct { // CRC32C digest of the object data. Computed by the Cloud Storage service for // all written objects. // If set in a WriteObjectRequest, service will validate that the stored - // object matches this checksum. - Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` - // 128 bit MD5 hash of the object data. - // For more information about using the MD5 hash, see - // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and - // ETags: Best Practices]. - // Not all objects will provide an MD5 hash. For example, composite objects - // provide only crc32c hashes. - // This value is equivalent to running `cat object.txt | openssl md5 -binary` - Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` -} - -func (x *ObjectChecksums) Reset() { - *x = ObjectChecksums{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectChecksums) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectChecksums) ProtoMessage() {} - -func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. -func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} -} - -func (x *ObjectChecksums) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -func (x *ObjectChecksums) GetMd5Hash() []byte { - if x != nil { - return x.Md5Hash - } - return nil -} - -// Hmac Key Metadata, which includes all information other than the secret. -type HmacKeyMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. Resource name ID of the key in the format - // {projectIdentifier}/{accessId}. - // {projectIdentifier} can be the project ID or project number. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Immutable. Globally unique id for keys. - AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. Email of the service account the key authenticates as. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // State of the key. One of ACTIVE, INACTIVE, or DELETED. - // Writable, can be updated by UpdateHmacKey operation. - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // Output only. The creation time of the HMAC key. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The etag of the HMAC key. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *HmacKeyMetadata) Reset() { - *x = HmacKeyMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HmacKeyMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HmacKeyMetadata) ProtoMessage() {} - -func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. -func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} -} - -func (x *HmacKeyMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *HmacKeyMetadata) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *HmacKeyMetadata) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *HmacKeyMetadata) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *HmacKeyMetadata) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetEtag() string { - if x != nil { - return x.Etag - } - return "" + // object matches this checksum. + Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` + // 128 bit MD5 hash of the object data. + // For more information about using the MD5 hash, see + // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and + // ETags: Best Practices]. + // Not all objects will provide an MD5 hash. For example, composite objects + // provide only crc32c hashes. This value is equivalent to running `cat + // object.txt | openssl md5 -binary` + Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` } -// A directive to publish Pub/Sub notifications upon changes to a bucket. -type NotificationConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The resource name of this NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - // The `{project}` portion may be `_` for globally unique buckets. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The Pub/Sub topic to which this subscription publishes. Formatted - // as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the NotificationConfig. - // If included in the metadata of GetNotificationConfigRequest, the operation - // will only be performed if the etag matches that of the NotificationConfig. - Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If - // empty, sent notifications for all event types. - EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` - // A list of additional attributes to attach to each Pub/Sub - // message published for this NotificationConfig. - CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this NotificationConfig to object names that - // begin with this prefix. - ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` - // Required. The desired content of the Payload. - PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` -} - -func (x *NotificationConfig) Reset() { - *x = NotificationConfig{} +func (x *ObjectChecksums) Reset() { + *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *NotificationConfig) String() string { +func (x *ObjectChecksums) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NotificationConfig) ProtoMessage() {} +func (*ObjectChecksums) ProtoMessage() {} -func (x *NotificationConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] +func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4968,60 +4008,25 @@ func (x *NotificationConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. -func (*NotificationConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} -} - -func (x *NotificationConfig) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NotificationConfig) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *NotificationConfig) GetEtag() string { - if x != nil { - return x.Etag - } - return "" +// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. +func (*ObjectChecksums) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } -func (x *NotificationConfig) GetEventTypes() []string { - if x != nil { - return x.EventTypes +func (x *ObjectChecksums) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C } - return nil + return 0 } -func (x *NotificationConfig) GetCustomAttributes() map[string]string { +func (x *ObjectChecksums) GetMd5Hash() []byte { if x != nil { - return x.CustomAttributes + return x.Md5Hash } return nil } -func (x *NotificationConfig) GetObjectNamePrefix() string { - if x != nil { - return x.ObjectNamePrefix - } - return "" -} - -func (x *NotificationConfig) GetPayloadFormat() string { - if x != nil { - return x.PayloadFormat - } - return "" -} - // Describes the Customer-Supplied Encryption Key mechanism used to store an // Object's data at rest. type CustomerEncryption struct { @@ -5039,7 +4044,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5052,7 +4057,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5065,7 +4070,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -5106,6 +4111,10 @@ type Object struct { // Immutable. The content generation of this object. Used for object // versioning. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Output only. Restore token used to differentiate deleted objects with the + // same name and generation. This field is output only, and only set for + // deleted objects in HNS buckets. + RestoreToken *string `protobuf:"bytes,35,opt,name=restore_token,json=restoreToken,proto3,oneof" json:"restore_token,omitempty"` // Output only. The version of the metadata for this generation of this // object. Used for preconditions and for detecting changes in metadata. A // metageneration number is only meaningful in the context of a particular @@ -5148,7 +4157,10 @@ type Object struct { // Components are accumulated by compose operations. ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` // Output only. Hashes for the data part of this object. This field is used - // for output only and will be silently ignored if provided in requests. + // for output only and will be silently ignored if provided in requests. The + // checksums of the complete object regardless of data range. If the object is + // downloaded in full, the client should compute one of these checksums over + // the downloaded object and compare it against the value provided here. Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` // Output only. The modification time of the object metadata. // Set initially to object creation time and then updated whenever any @@ -5214,7 +4226,7 @@ type Object struct { func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5227,7 +4239,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5240,7 +4252,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *Object) GetName() string { @@ -5271,6 +4283,13 @@ func (x *Object) GetGeneration() int64 { return 0 } +func (x *Object) GetRestoreToken() string { + if x != nil && x.RestoreToken != nil { + return *x.RestoreToken + } + return "" +} + func (x *Object) GetMetageneration() int64 { if x != nil { return x.Metageneration @@ -5452,7 +4471,10 @@ type ObjectAccessControl struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The access permission for the entity. + // The access permission for the entity. One of the following values: + // * `READER` + // * `WRITER` + // * `OWNER` Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` // The ID of the access-control entry. Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` @@ -5496,7 +4518,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5509,7 +4531,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5522,7 +4544,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *ObjectAccessControl) GetRole() string { @@ -5607,7 +4629,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5620,7 +4642,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5633,7 +4655,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5672,7 +4694,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5685,7 +4707,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5698,7 +4720,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5715,57 +4737,6 @@ func (x *ProjectTeam) GetTeam() string { return "" } -// A service account, owned by Cloud Storage, which may be used when taking -// action on behalf of a given project, for example to publish Pub/Sub -// notifications or to retrieve security keys. -type ServiceAccount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of the notification. - EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` -} - -func (x *ServiceAccount) Reset() { - *x = ServiceAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceAccount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceAccount) ProtoMessage() {} - -func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. -func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} -} - -func (x *ServiceAccount) GetEmailAddress() string { - if x != nil { - return x.EmailAddress - } - return "" -} - // The owner of a specific resource. type Owner struct { state protoimpl.MessageState @@ -5781,7 +4752,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5794,7 +4765,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5807,7 +4778,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } func (x *Owner) GetEntity() string { @@ -5841,7 +4812,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5854,7 +4825,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5867,7 +4838,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *ContentRange) GetStart() int64 { @@ -5909,7 +4880,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5922,7 +4893,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5935,7 +4906,7 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message // Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0} } func (x *ComposeObjectRequest_SourceObject) GetName() string { @@ -5974,7 +4945,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5987,7 +4958,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6000,7 +4971,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p // Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0} } func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { @@ -6023,7 +4994,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6036,7 +5007,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6049,7 +5020,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -6089,7 +5060,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6102,7 +5073,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6115,7 +5086,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -6160,7 +5131,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6173,7 +5144,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6186,7 +5157,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -6212,7 +5183,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6225,7 +5196,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6238,7 +5209,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -6270,7 +5241,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6283,7 +5254,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6296,7 +5267,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -6322,7 +5293,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6335,7 +5306,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6348,7 +5319,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -6387,7 +5358,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6400,7 +5371,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6413,7 +5384,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 6} } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -6454,7 +5425,7 @@ type Bucket_SoftDeletePolicy struct { func (x *Bucket_SoftDeletePolicy) Reset() { *x = Bucket_SoftDeletePolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6467,7 +5438,7 @@ func (x *Bucket_SoftDeletePolicy) String() string { func (*Bucket_SoftDeletePolicy) ProtoMessage() {} func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6480,7 +5451,7 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 7} } func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { @@ -6512,7 +5483,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6525,7 +5496,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6538,7 +5509,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 8} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6572,7 +5543,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6585,7 +5556,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6598,7 +5569,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 9} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6630,7 +5601,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6643,7 +5614,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6656,7 +5627,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 10} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6691,7 +5662,7 @@ type Bucket_Autoclass struct { func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6704,7 +5675,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6717,7 +5688,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 11} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6761,7 +5732,7 @@ type Bucket_HierarchicalNamespace struct { func (x *Bucket_HierarchicalNamespace) Reset() { *x = Bucket_HierarchicalNamespace{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6774,7 +5745,7 @@ func (x *Bucket_HierarchicalNamespace) String() string { func (*Bucket_HierarchicalNamespace) ProtoMessage() {} func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6787,7 +5758,7 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 12} } func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { @@ -6816,7 +5787,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6829,7 +5800,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6842,7 +5813,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6875,7 +5846,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6888,7 +5859,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6901,7 +5872,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6935,7 +5906,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6948,7 +5919,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6961,7 +5932,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -7033,7 +6004,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[76] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7046,7 +6017,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[76] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7059,7 +6030,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -7242,386 +6213,385 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, - 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, - 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, - 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, - 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, - 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, - 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, - 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, + 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, - 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, - 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, + 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, + 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, + 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, + 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, + 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, + 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, + 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, + 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, + 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, + 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, + 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, @@ -7642,834 +6612,662 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, - 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, - 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, - 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, - 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, - 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, - 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, - 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, - 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, - 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, + 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, + 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, + 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, + 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, + 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, - 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, - 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, - 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, + 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, - 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, - 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, + 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, + 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, + 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, - 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, - 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, - 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, - 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, + 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, + 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, - 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, - 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, - 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, - 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, - 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, - 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, - 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, - 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, - 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, - 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, - 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, - 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, - 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, - 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, - 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, - 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, - 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, - 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, - 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, - 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, - 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, - 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, - 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, + 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, + 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, + 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, + 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, + 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, + 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, + 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, + 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, - 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, - 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, - 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, - 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, - 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, - 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, - 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, - 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, - 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, - 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, - 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, - 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, - 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, - 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, - 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, + 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, + 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, + 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, + 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, + 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, + 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, + 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, + 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, + 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, + 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, - 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, - 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, + 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, - 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, - 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, - 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, - 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, - 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, - 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, - 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, - 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, - 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, - 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, - 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, - 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, - 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, - 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, - 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, - 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, - 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, - 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, - 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, - 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, - 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, - 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, - 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, - 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, - 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, - 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, - 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, - 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, - 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, - 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, - 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, - 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, - 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, - 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, - 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, + 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, + 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, + 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, + 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, + 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, + 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, + 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, + 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, + 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, + 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, + 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, + 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, - 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, - 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, - 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, - 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, - 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, - 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, - 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, - 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, - 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, - 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, + 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, + 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, + 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, + 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf7, 0x0d, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, @@ -8479,479 +7277,390 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, - 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, - 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, + 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, + 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, + 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, - 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, - 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x02, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x03, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, + 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, + 0x13, 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, + 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, + 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x8c, 0x1c, 0x0a, 0x07, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, + 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, + 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, - 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, - 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, - 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, - 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, - 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, - 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, - 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, - 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, - 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, - 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, - 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, - 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, - 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, - 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, - 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, + 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, + 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, + 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, + 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, - 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, - 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, + 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, - 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, - 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, - 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, - 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, - 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, - 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, - 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, - 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, - 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, - 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, - 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, - 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, - 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, - 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, - 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, - 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, - 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, + 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x1a, 0xa7, 0x02, + 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, + 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8967,7 +7676,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 62) var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8977,262 +7686,216 @@ var file_google_storage_v2_storage_proto_goTypes = []any{ (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest - (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest - (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest - (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest - (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse - (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest - (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest - (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse - (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest - (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse - (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 44: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata - (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig - (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption - (*Object)(nil), // 51: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount - (*Owner)(nil), // 56: google.storage.v2.Owner - (*ContentRange)(nil), // 57: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy - (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass - (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace - nil, // 73: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 79: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 82: google.protobuf.Duration - (*date.Date)(nil), // 83: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 87: google.protobuf.Empty - (*iampb.Policy)(nil), // 88: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse + (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest + (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest + (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest + (*CommonObjectRequestParams)(nil), // 29: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 30: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 31: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 32: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 33: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 34: google.storage.v2.ObjectChecksums + (*CustomerEncryption)(nil), // 35: google.storage.v2.CustomerEncryption + (*Object)(nil), // 36: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 37: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 38: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 39: google.storage.v2.ProjectTeam + (*Owner)(nil), // 40: google.storage.v2.Owner + (*ContentRange)(nil), // 41: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 42: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 43: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 44: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 45: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 46: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 47: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 48: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 49: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 50: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 51: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 52: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 53: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 54: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 55: google.storage.v2.Bucket.Autoclass + (*Bucket_HierarchicalNamespace)(nil), // 56: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 57: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 58: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 59: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 60: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 61: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 62: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 63: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 64: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 65: google.protobuf.Duration + (*date.Date)(nil), // 66: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 67: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 68: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 69: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 70: google.protobuf.Empty + (*iampb.Policy)(nil), // 71: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 72: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig - 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig - 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace - 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp - 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp - 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 135, // [135:167] is the sub-list for method output_type - 103, // [103:135] is the sub-list for method input_type - 103, // [103:103] is the sub-list for extension type_name - 103, // [103:103] is the sub-list for extension extendee - 0, // [0:103] is the sub-list for field type_name + 63, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 31, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 36, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 29, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 33, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 41, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 36, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 36, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 63, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 36, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 29, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 29, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 63, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 29, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 32, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 37, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 48, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 64, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 45, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 64, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 57, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 53, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 52, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 49, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 40, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 46, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 44, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 50, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 47, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 54, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 55, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 56, // 61: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 51, // 62: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 39, // 63: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 37, // 64: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 65: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 64, // 66: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 34, // 67: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 64, // 68: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 64, // 69: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 64, // 70: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 62, // 71: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 40, // 72: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 35, // 73: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 64, // 74: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 64, // 75: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 64, // 76: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 39, // 77: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 36, // 78: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 43, // 79: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 58, // 80: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 59, // 81: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 64, // 82: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 65, // 83: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 65, // 84: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 64, // 85: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 64, // 86: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 64, // 87: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 64, // 88: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 60, // 89: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 61, // 90: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 66, // 91: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 66, // 92: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 66, // 93: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 94: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 95: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 96: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 97: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 98: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 67, // 99: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 68, // 100: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 69, // 101: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 102: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 9, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 10, // 105: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 11, // 106: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 14, // 107: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 13, // 108: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 28, // 109: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 17, // 110: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 19, // 111: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 21, // 112: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 24, // 113: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 26, // 114: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 22, // 115: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 70, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 31, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 31, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 31, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 71, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 71, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 72, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 31, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 36, // 125: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 70, // 126: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 36, // 127: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 12, // 128: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 36, // 129: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 15, // 130: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 36, // 131: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 18, // 132: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 20, // 133: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 38, // 134: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 25, // 135: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 27, // 136: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 23, // 137: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 116, // [116:138] is the sub-list for method output_type + 94, // [94:116] is the sub-list for method input_type + 94, // [94:94] is the sub-list for extension type_name + 94, // [94:94] is the sub-list for extension extendee + 0, // [0:94] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -9326,66 +7989,6 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*DeleteNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*GetNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*CreateNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ListNotificationConfigsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ListNotificationConfigsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest); i { case 0: return &v.state @@ -9397,7 +8000,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteObjectRequest); i { case 0: return &v.state @@ -9409,7 +8012,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state @@ -9421,7 +8024,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state @@ -9433,7 +8036,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state @@ -9445,7 +8048,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectRequest); i { case 0: return &v.state @@ -9457,7 +8060,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetObjectRequest); i { case 0: return &v.state @@ -9469,7 +8072,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectResponse); i { case 0: return &v.state @@ -9481,7 +8084,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectSpec); i { case 0: return &v.state @@ -9493,7 +8096,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectRequest); i { case 0: return &v.state @@ -9505,7 +8108,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectResponse); i { case 0: return &v.state @@ -9517,7 +8120,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state @@ -9529,7 +8132,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state @@ -9541,7 +8144,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsRequest); i { case 0: return &v.state @@ -9553,7 +8156,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state @@ -9565,7 +8168,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state @@ -9577,7 +8180,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state @@ -9589,7 +8192,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*RewriteResponse); i { case 0: return &v.state @@ -9601,7 +8204,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state @@ -9613,7 +8216,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state @@ -9625,7 +8228,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state @@ -9637,103 +8240,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*GetServiceAccountRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*CreateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*CreateHmacKeyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*DeleteHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*GetHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*ListHmacKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*ListHmacKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*UpdateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state @@ -9745,7 +8252,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*ServiceConstants); i { case 0: return &v.state @@ -9757,7 +8264,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state @@ -9769,7 +8276,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*BucketAccessControl); i { case 0: return &v.state @@ -9781,7 +8288,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*ChecksummedData); i { case 0: return &v.state @@ -9793,7 +8300,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*ObjectChecksums); i { case 0: return &v.state @@ -9805,31 +8312,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*HmacKeyMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { - switch v := v.(*NotificationConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryption); i { case 0: return &v.state @@ -9841,7 +8324,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*Object); i { case 0: return &v.state @@ -9853,7 +8336,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*ObjectAccessControl); i { case 0: return &v.state @@ -9865,20 +8348,8 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*ListObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*ProjectTeam); i { + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -9889,8 +8360,8 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { - switch v := v.(*ServiceAccount); i { + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -9901,7 +8372,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*Owner); i { case 0: return &v.state @@ -9913,7 +8384,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*ContentRange); i { case 0: return &v.state @@ -9925,7 +8396,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state @@ -9937,7 +8408,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state @@ -9949,7 +8420,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Billing); i { case 0: return &v.state @@ -9961,7 +8432,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Cors); i { case 0: return &v.state @@ -9973,7 +8444,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Encryption); i { case 0: return &v.state @@ -9985,7 +8456,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state @@ -9997,7 +8468,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state @@ -10009,7 +8480,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Logging); i { case 0: return &v.state @@ -10021,7 +8492,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state @@ -10033,7 +8504,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*Bucket_SoftDeletePolicy); i { case 0: return &v.state @@ -10045,7 +8516,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Versioning); i { case 0: return &v.state @@ -10057,7 +8528,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Website); i { case 0: return &v.state @@ -10069,7 +8540,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state @@ -10081,7 +8552,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Autoclass); i { case 0: return &v.state @@ -10093,7 +8564,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { switch v := v.(*Bucket_HierarchicalNamespace); i { case 0: return &v.state @@ -10105,7 +8576,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state @@ -10117,7 +8588,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state @@ -10129,7 +8600,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -10141,7 +8612,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -10158,51 +8629,51 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{ (*BidiWriteObjectRequest_UploadId)(nil), (*BidiWriteObjectRequest_WriteObjectSpec)(nil), (*BidiWriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{ (*BidiWriteObjectResponse_PersistedSize)(nil), (*BidiWriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[33].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[54].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[60].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 79, + NumMessages: 62, NumExtensions: 0, NumServices: 1, }, @@ -10247,25 +8718,16 @@ type StorageClient interface { // The `resource` field in the request should be // `projects/_/buckets/{bucket}`. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -10393,18 +8855,6 @@ type StorageClient interface { // object name, the sequence of returned `persisted_size` values will be // non-decreasing. QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) } type storageClient struct { @@ -10496,42 +8946,6 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { - out := new(ListNotificationConfigsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) @@ -10719,60 +9133,6 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat return out, nil } -func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { - out := new(ServiceAccount) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { - out := new(CreateHmacKeyResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { - out := new(ListHmacKeysResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. @@ -10793,25 +9153,16 @@ type StorageServer interface { // The `resource` field in the request should be // `projects/_/buckets/{bucket}`. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) @@ -10939,18 +9290,6 @@ type StorageServer interface { // object name, the sequence of returned `persisted_size` values will be // non-decreasing. QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) } // UnimplementedStorageServer can be embedded to have forward compatible implementations. @@ -10984,18 +9323,6 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.Te func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") -} func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") } @@ -11035,24 +9362,6 @@ func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartRe func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") } -func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") -} -func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") -} -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") -} -func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") -} -func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") -} -func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") -} func RegisterStorageServer(s *grpc.Server, srv StorageServer) { s.RegisterService(&_Storage_serviceDesc, srv) @@ -11220,78 +9529,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationConfigsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListNotificationConfigs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ComposeObjectRequest) if err := dec(in); err != nil { @@ -11545,114 +9782,6 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceAccountRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetServiceAccount(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetServiceAccount", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListHmacKeysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListHmacKeys(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListHmacKeys", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - var _Storage_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.storage.v2.Storage", HandlerType: (*StorageServer)(nil), @@ -11693,22 +9822,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateBucket", Handler: _Storage_UpdateBucket_Handler, }, - { - MethodName: "DeleteNotificationConfig", - Handler: _Storage_DeleteNotificationConfig_Handler, - }, - { - MethodName: "GetNotificationConfig", - Handler: _Storage_GetNotificationConfig_Handler, - }, - { - MethodName: "CreateNotificationConfig", - Handler: _Storage_CreateNotificationConfig_Handler, - }, - { - MethodName: "ListNotificationConfigs", - Handler: _Storage_ListNotificationConfigs_Handler, - }, { MethodName: "ComposeObject", Handler: _Storage_ComposeObject_Handler, @@ -11749,30 +9862,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "QueryWriteStatus", Handler: _Storage_QueryWriteStatus_Handler, }, - { - MethodName: "GetServiceAccount", - Handler: _Storage_GetServiceAccount_Handler, - }, - { - MethodName: "CreateHmacKey", - Handler: _Storage_CreateHmacKey_Handler, - }, - { - MethodName: "DeleteHmacKey", - Handler: _Storage_DeleteHmacKey_Handler, - }, - { - MethodName: "GetHmacKey", - Handler: _Storage_GetHmacKey_Handler, - }, - { - MethodName: "ListHmacKeys", - Handler: _Storage_ListHmacKeys_Handler, - }, - { - MethodName: "UpdateHmacKey", - Handler: _Storage_UpdateHmacKey_Handler, - }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go new file mode 100644 index 000000000..7db3ff527 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/experimental.go @@ -0,0 +1,32 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// All options in this package are experimental. + +package internal + +var ( + // WithMetricInterval is a function which is implemented by storage package. + // It sets how often to emit metrics when using NewPeriodicReader and must be + // greater than 1 minute. + WithMetricInterval any // func (*time.Duration) option.ClientOption + + // WithMetricExporter is a function which is implemented by storage package. + // Set an alternate client-side metric Exporter to emit metrics through. + WithMetricExporter any // func (*metric.Exporter) option.ClientOption + + // WithReadStallTimeout is a function which is implemented by storage package. + // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption. + WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption +) diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index e5b2de091..fc6b11e22 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.43.0" +const Version = "1.46.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index de57b4bbb..99783f3df 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -74,7 +74,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) } attempts++ - return !errorFunc(err), err + retryable := errorFunc(err) + // Explicitly check context cancellation so that we can distinguish between a + // DEADLINE_EXCEEDED error from the server and a user-set context deadline. + // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's + // sent by the server) in both cases. + if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) { + retryable = false + } + return !retryable, err }) } @@ -84,21 +92,7 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - // TODO: remove this once the respective transport packages merge xGoogHeader. - // Also remove gl-go at that time, as it will be repeated. - hdrs := callctx.HeadersFromContext(ctx) - for _, v := range hdrs[xGoogHeaderKey] { - xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") - } - - if hdrs[xGoogHeaderKey] != nil { - // Replace the key instead of adding it, if there was anything to merge with. - hdrs[xGoogHeaderKey] = []string{xGoogHeader} - } else { - // TODO: keep this line when removing the above code. - ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) - } - + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) return ctx } @@ -138,14 +132,18 @@ func ShouldRetry(err error) bool { return true } } + case *net.DNSError: + if e.IsTemporary { + return true + } case interface{ Temporary() bool }: if e.Temporary() { return true } } - // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. + // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC. if st, ok := status.FromError(err); ok { - if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { + if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded { return true } } diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 1d6cfdf59..bc15900f0 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,7 +21,6 @@ import ( "regexp" "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) @@ -92,30 +91,6 @@ func toNotification(rn *raw.Notification) *Notification { return n } -func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { - n := &Notification{ - ID: pbn.GetName(), - EventTypes: pbn.GetEventTypes(), - ObjectNamePrefix: pbn.GetObjectNamePrefix(), - CustomAttributes: pbn.GetCustomAttributes(), - PayloadFormat: pbn.GetPayloadFormat(), - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) - return n -} - -func toProtoNotification(n *Notification) *storagepb.NotificationConfig { - return &storagepb.NotificationConfig{ - Name: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: n.PayloadFormat, - } -} - var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) // parseNotificationTopic extracts the project and topic IDs from from the full @@ -144,6 +119,7 @@ func toRawNotification(n *Notification) *raw.Notification { // AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID // and PayloadFormat, and must not set its ID. The other fields are all optional. The // returned Notification's ID can be used to refer to it. +// Note: gRPC is not supported. func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") defer func() { trace.EndSpan(ctx, err) }() @@ -165,6 +141,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re // Notifications returns all the Notifications configured for this bucket, as a map // indexed by notification ID. +// Note: gRPC is not supported. func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") defer func() { trace.EndSpan(ctx, err) }() @@ -182,15 +159,8 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } -func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { - m := map[string]*Notification{} - for _, n := range ns { - m[n.Name] = toNotificationFromProto(n) - } - return m -} - // DeleteNotification deletes the notification with the given ID. +// Note: gRPC is not supported. func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") defer func() { trace.EndSpan(ctx, err) }() diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index debdb0f52..3b0cf9e71 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -15,15 +15,71 @@ package storage import ( + "os" + "strconv" + "time" + + "cloud.google.com/go/storage/experimental" + storageinternal "cloud.google.com/go/storage/internal" + "go.opentelemetry.io/otel/sdk/metric" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" ) -// storageConfig contains the Storage client option configuration that can be +const ( + dynamicReadReqIncreaseRateEnv = "DYNAMIC_READ_REQ_INCREASE_RATE" + dynamicReadReqInitialTimeoutEnv = "DYNAMIC_READ_REQ_INITIAL_TIMEOUT" + defaultDynamicReadReqIncreaseRate = 15.0 + defaultDynamicReqdReqMaxTimeout = 1 * time.Hour + defaultDynamicReadReqMinTimeout = 500 * time.Millisecond + defaultTargetPercentile = 0.99 +) + +func init() { + // initialize experimental options + storageinternal.WithMetricExporter = withMetricExporter + storageinternal.WithMetricInterval = withMetricInterval + storageinternal.WithReadStallTimeout = withReadStallTimeout +} + +// getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable. +// It returns defaultDynamicReadReqIncreaseRate if env is not set or the set value is invalid. +func getDynamicReadReqIncreaseRateFromEnv() float64 { + increaseRate := os.Getenv(dynamicReadReqIncreaseRateEnv) + if increaseRate == "" { + return defaultDynamicReadReqIncreaseRate + } + + val, err := strconv.ParseFloat(increaseRate, 64) + if err != nil { + return defaultDynamicReadReqIncreaseRate + } + return val +} + +// getDynamicReadReqInitialTimeoutSecFromEnv returns the value set in the env variable. +// It returns the passed defaultVal if env is not set or the set value is invalid. +func getDynamicReadReqInitialTimeoutSecFromEnv(defaultVal time.Duration) time.Duration { + initialTimeout := os.Getenv(dynamicReadReqInitialTimeoutEnv) + if initialTimeout == "" { + return defaultVal + } + + val, err := time.ParseDuration(initialTimeout) + if err != nil { + return defaultVal + } + return val +} + // set through storageClientOptions. type storageConfig struct { - useJSONforReads bool - readAPIWasSet bool + useJSONforReads bool + readAPIWasSet bool + disableClientMetrics bool + metricExporter *metric.Exporter + metricInterval time.Duration + readStallTimeoutConfig *experimental.ReadStallTimeoutConfig } // newStorageConfig generates a new storageConfig with all the given @@ -78,3 +134,94 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { c.useJSONforReads = w.useJSON c.readAPIWasSet = true } + +type withDisabledClientMetrics struct { + internaloption.EmbeddableAdapter + disabledClientMetrics bool +} + +// WithDisabledClientMetrics is an option that may be passed to [NewClient]. +// gRPC metrics are enabled by default in the GCS client and will export the +// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to +// [Google Cloud Monitoring]. The option is used to disable metrics. +// Google Cloud Support can use this information to more quickly diagnose +// problems related to GCS and gRPC. +// Sending this data does not incur any billing charges, and requires minimal +// CPU (a single RPC every few minutes) or memory (a few KiB to batch the +// telemetry). +// +// The default is to enable client metrics. To opt-out of metrics collected use +// this option. +// +// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md +// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md +// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs +func WithDisabledClientMetrics() option.ClientOption { + return &withDisabledClientMetrics{disabledClientMetrics: true} +} + +func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) { + c.disableClientMetrics = w.disabledClientMetrics +} + +type withMeterOptions struct { + internaloption.EmbeddableAdapter + // set sampling interval + interval time.Duration +} + +func withMetricInterval(interval time.Duration) option.ClientOption { + return &withMeterOptions{interval: interval} +} + +func (w *withMeterOptions) ApplyStorageOpt(c *storageConfig) { + c.metricInterval = w.interval +} + +type withMetricExporterConfig struct { + internaloption.EmbeddableAdapter + // exporter override + metricExporter *metric.Exporter +} + +func withMetricExporter(ex *metric.Exporter) option.ClientOption { + return &withMetricExporterConfig{metricExporter: ex} +} + +func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) { + c.metricExporter = w.metricExporter +} + +// WithReadStallTimeout is an option that may be passed to [NewClient]. +// It enables the client to retry the stalled read request, happens as part of +// storage.Reader creation. As the name suggest, timeout is adjusted dynamically +// based on past observed read-req latencies. +// +// This is only supported for the read operation and that too for http(XML) client. +// Grpc read-operation will be supported soon. +func withReadStallTimeout(rstc *experimental.ReadStallTimeoutConfig) option.ClientOption { + // TODO (raj-prince): To keep separate dynamicDelay instance for different BucketHandle. + // Currently, dynamicTimeout is kept at the client and hence shared across all the + // BucketHandle, which is not the ideal state. As latency depends on location of VM + // and Bucket, and read latency of different buckets may lie in different range. + // Hence having a separate dynamicTimeout instance at BucketHandle level will + // be better + if rstc.Min == time.Duration(0) { + rstc.Min = defaultDynamicReadReqMinTimeout + } + if rstc.TargetPercentile == 0 { + rstc.TargetPercentile = defaultTargetPercentile + } + return &withReadStallTimeoutConfig{ + readStallTimeoutConfig: rstc, + } +} + +type withReadStallTimeoutConfig struct { + internaloption.EmbeddableAdapter + readStallTimeoutConfig *experimental.ReadStallTimeoutConfig +} + +func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) { + config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 6da2432f0..e1d966592 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -65,6 +65,19 @@ type ReaderObjectAttrs struct { // meaningful in the context of a particular generation of a // particular object. Metageneration int64 + + // CRC32C is the CRC32 checksum of the entire object's content using the + // Castagnoli93 polynomial, if available. + CRC32C uint32 + + // Decompressed is true if the object is stored as a gzip file and was + // decompressed when read. + // Objects are automatically decompressed if the object's metadata property + // "Content-Encoding" is set to "gzip" or satisfies decompressive + // transcoding as per https://cloud.google.com/storage/docs/transcoding. + // + // To prevent decompression on reads, use [ObjectHandle.ReadCompressed]. + Decompressed bool } // NewReader creates a new Reader to read the contents of the @@ -91,7 +104,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies // decompressive transcoding per https://cloud.google.com/storage/docs/transcoding // that file will be served back whole, regardless of the requested range as -// Google Cloud Storage dictates. +// Google Cloud Storage dictates. If decompressive transcoding occurs, +// [Reader.Attrs.Decompressed] will be true. // // By default, reads are made using the Cloud Storage XML API. We recommend // using the JSON API instead, which can be done by setting [WithJSONReads] diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index b6316fa66..0c11eb82a 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -214,14 +214,11 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // NewGRPCClient creates a new Storage client using the gRPC transport and API. // Client methods which have not been implemented in gRPC will return an error. -// In particular, methods for Cloud Pub/Sub notifications are not supported. +// In particular, methods for Cloud Pub/Sub notifications, Service Account HMAC +// keys, and ServiceAccount are not supported. // Using a non-default universe domain is also not supported with the Storage // gRPC client. // -// The storage gRPC API is still in preview and not yet publicly available. -// If you would like to use the API, please first contact your GCP account rep to -// request access. The API may be subject to breaking changes. -// // Clients should be reused instead of created as needed. The methods of Client // are safe for concurrent use by multiple goroutines. // @@ -1695,7 +1692,6 @@ type Query struct { // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of // prefixes returned by the query. Only applicable if Delimiter is set to /. - // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. IncludeFoldersAsPrefixes bool // SoftDeleted indicates whether to list soft-deleted objects. @@ -2350,6 +2346,7 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec } // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +// Note: gRPC is not supported. func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { o := makeStorageOpts(true, c.retry, "") return c.tc.GetServiceAccount(ctx, projectID, o...) diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go deleted file mode 100644 index 5706fe2c7..000000000 --- a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go +++ /dev/null @@ -1,51 +0,0 @@ -package helper - -import ( - "fmt" - "github.com/aliyun/credentials-go/credentials" - "os" -) - -const ( - EnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" - EnvOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" - EnvOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" -) - -func HaveOidcCredentialRequiredEnv() bool { - return os.Getenv(EnvRoleArn) != "" && - os.Getenv(EnvOidcProviderArn) != "" && - os.Getenv(EnvOidcTokenFile) != "" -} - -func NewOidcCredential(sessionName string) (credential credentials.Credential, err error) { - return GetOidcCredential(sessionName) -} - -// Deprecated: Use NewOidcCredential instead -func GetOidcCredential(sessionName string) (credential credentials.Credential, err error) { - roleArn := os.Getenv(EnvRoleArn) - oidcArn := os.Getenv(EnvOidcProviderArn) - tokenFile := os.Getenv(EnvOidcTokenFile) - if roleArn == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvRoleArn) - } - if oidcArn == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvOidcProviderArn) - } - if tokenFile == "" { - return nil, fmt.Errorf("environment variable %q is missing", EnvOidcTokenFile) - } - if _, err := os.Stat(tokenFile); err != nil { - return nil, fmt.Errorf("unable to read file at %q: %s", tokenFile, err) - } - - config := new(credentials.Config). - SetType("oidc_role_arn"). - SetOIDCProviderArn(oidcArn). - SetOIDCTokenFilePath(tokenFile). - SetRoleArn(roleArn). - SetRoleSessionName(sessionName) - - return credentials.NewCredential(config) -} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/LICENSE similarity index 100% rename from vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE rename to vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/LICENSE diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go new file mode 100644 index 000000000..71719adc9 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/accesskey_provider.go @@ -0,0 +1,27 @@ +package provider + +import ( + "context" + "errors" +) + +type AccessKeyProvider struct { + cred *Credentials +} + +func NewAccessKeyProvider(accessKeyId, accessKeySecret string) *AccessKeyProvider { + return &AccessKeyProvider{ + cred: &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + }, + } +} + +func (a *AccessKeyProvider) Credentials(ctx context.Context) (*Credentials, error) { + if a.cred.AccessKeyId == "" || a.cred.AccessKeySecret == "" { + return nil, NewNotEnableError(errors.New("AccessKeyId or AccessKeySecret is empty")) + } + + return a.cred.DeepCopy(), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go new file mode 100644 index 000000000..92cfcbe4c --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/chain_provider.go @@ -0,0 +1,220 @@ +package provider + +import ( + "context" + "fmt" + "strings" + "sync" + "time" +) + +var defaultRuntimeSwitchCacheDuration = time.Minute * 15 + +type ChainProvider struct { + providers []CredentialsProvider + + currentProvider CredentialsProvider + Logger Logger + logPrefix string + + enableRuntimeSwitch bool + runtimeSwitchCacheDuration time.Duration + lastSelectProviderTime time.Time + + lock sync.RWMutex +} + +type ChainProviderOptions struct { + EnableRuntimeSwitch bool + RuntimeSwitchCacheDuration time.Duration + + logPrefix string +} + +func NewChainProvider(providers ...CredentialsProvider) *ChainProvider { + return NewChainProviderWithOptions(providers, ChainProviderOptions{}) +} + +func NewChainProviderWithOptions(providers []CredentialsProvider, opts ChainProviderOptions) *ChainProvider { + opts.applyDefaults() + + if len(providers) == 0 { + return NewDefaultChainProvider(DefaultChainProviderOptions{ + EnableRuntimeSwitch: opts.EnableRuntimeSwitch, + RuntimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + }) + } + return &ChainProvider{ + enableRuntimeSwitch: opts.EnableRuntimeSwitch, + runtimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + providers: providers, + logPrefix: opts.logPrefix, + } +} + +func (c *ChainProvider) Credentials(ctx context.Context) (*Credentials, error) { + return c.getCredentials(ctx) +} + +func (c *ChainProvider) SelectProvider(ctx context.Context) (CredentialsProvider, error) { + return c.selectProvider(ctx) +} + +func (c *ChainProvider) Stop(ctx context.Context) { + c.logger().Debug(fmt.Sprintf("%s start to stop...", c.logPrefix)) + + for _, p := range c.providers { + if s, ok := p.(Stopper); ok { + s.Stop(ctx) + } + } +} + +func (c *ChainProvider) getCredentials(ctx context.Context) (*Credentials, error) { + p := c.getCurrentProvider() + if p != nil { + return p.Credentials(ctx) + } + + p, err := c.selectProvider(ctx) + if err != nil { + return nil, err + } + c.setCurrentProvider(p) + + return p.Credentials(ctx) +} + +func (c *ChainProvider) selectProvider(ctx context.Context) (CredentialsProvider, error) { + var notEnableErrors []string + for _, p := range c.providers { + if _, err := p.Credentials(ctx); err != nil { + if IsNotEnableError(err) { + c.logger().Debug(fmt.Sprintf("%s provider %T is not enabled will try to next: %s", + c.logPrefix, p, err.Error())) + notEnableErrors = append(notEnableErrors, fmt.Sprintf("provider %T is not enabled: %s", p, err.Error())) + continue + } + } + return p, nil + } + + err := fmt.Errorf("no available credentials provider: [%s]", strings.Join(notEnableErrors, ", ")) + return nil, NewNoAvailableProviderError(err) +} + +func (c *ChainProvider) getCurrentProvider() CredentialsProvider { + c.lock.RLock() + defer c.lock.RUnlock() + + if !c.enableRuntimeSwitch { + return c.currentProvider + } + + p := c.currentProvider + if p == nil { + return nil + } + if c.lastSelectProviderTime.IsZero() { + return nil + } + if time.Since(c.lastSelectProviderTime) >= c.runtimeSwitchCacheDuration { + c.logger().Debug(fmt.Sprintf("%s trigger select provider again", c.logPrefix)) + return nil + } + + return p +} + +func (c *ChainProvider) setCurrentProvider(p CredentialsProvider) { + c.lock.Lock() + defer c.lock.Unlock() + + prePT := fmt.Sprintf("%T", c.currentProvider) + pT := fmt.Sprintf("%T", p) + if prePT != pT { + c.logger().Info(fmt.Sprintf("%s switch to using new provider: %s -> %s", c.logPrefix, prePT, pT)) + } + + c.lastSelectProviderTime = time.Now() + c.currentProvider = p +} + +func (c *ChainProvider) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLog +} + +type DefaultChainProviderOptions struct { + EnableRuntimeSwitch bool + RuntimeSwitchCacheDuration time.Duration + + STSEndpoint string + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger + + logPrefix string +} + +func NewDefaultChainProvider(opts DefaultChainProviderOptions) *ChainProvider { + opts.applyDefaults() + + p := NewChainProviderWithOptions( + []CredentialsProvider{ + NewEnvProvider(EnvProviderOptions{}), + NewOIDCProvider(OIDCProviderOptions{ + STSEndpoint: opts.STSEndpoint, + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + NewEncryptedFileProvider(EncryptedFileProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + NewECSMetadataProvider(ECSMetadataProviderOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + }), + }, + ChainProviderOptions{ + EnableRuntimeSwitch: opts.EnableRuntimeSwitch, + RuntimeSwitchCacheDuration: opts.RuntimeSwitchCacheDuration, + logPrefix: opts.logPrefix, + }, + ) + p.Logger = opts.Logger + return p +} + +// Deprecated: use NewDefaultChainProvider instead +func DefaultChainProvider() *ChainProvider { + return NewDefaultChainProvider(DefaultChainProviderOptions{}) +} + +// Deprecated: use NewDefaultChainProvider instead +func DefaultChainProviderWithLogger(l Logger) *ChainProvider { + return NewDefaultChainProvider(DefaultChainProviderOptions{ + Logger: l, + }) +} + +func (o *ChainProviderOptions) applyDefaults() { + if o.RuntimeSwitchCacheDuration <= 0 { + o.RuntimeSwitchCacheDuration = defaultRuntimeSwitchCacheDuration + } + if o.logPrefix == "" { + o.logPrefix = "[ChainProvider]" + } +} + +func (o *DefaultChainProviderOptions) applyDefaults() { + if o.logPrefix == "" { + o.logPrefix = "[DefaultChainProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go new file mode 100644 index 000000000..778458d7b --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/credentials.go @@ -0,0 +1,22 @@ +package provider + +import "time" + +type Credentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration time.Time +} + +func (c *Credentials) DeepCopy() *Credentials { + if c == nil { + return nil + } + return &Credentials{ + AccessKeyId: c.AccessKeyId, + AccessKeySecret: c.AccessKeySecret, + SecurityToken: c.SecurityToken, + Expiration: c.Expiration, + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go new file mode 100644 index 000000000..4c7712b81 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ecsmetadata_provider.go @@ -0,0 +1,198 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + defaultExpiryWindow = time.Minute * 30 + defaultECSMetadataServerEndpoint = "http://100.100.100.200" + defaultECSMetadataTokenTTLSeconds = 3600 + defaultClientTimeout = time.Second * 30 +) + +type ECSMetadataProvider struct { + u *Updater + + endpoint string + roleName string + metadataToken string + metadataTokenTTLSeconds int + metadataTokenExp time.Time + + client *commonHttpClient + Logger Logger +} + +type ECSMetadataProviderOptions struct { + Endpoint string + Timeout time.Duration + Transport http.RoundTripper + + RoleName string + MetadataTokenTTLSeconds int + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewECSMetadataProvider(opts ECSMetadataProviderOptions) *ECSMetadataProvider { + opts.applyDefaults() + + client := newCommonHttpClient(opts.Transport, opts.Timeout) + client.logger = opts.Logger + e := &ECSMetadataProvider{ + endpoint: opts.Endpoint, + roleName: opts.RoleName, + metadataTokenTTLSeconds: opts.MetadataTokenTTLSeconds, + client: client, + Logger: opts.Logger, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[ECSMetadataProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (e *ECSMetadataProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.u.Credentials(ctx) +} + +func (e *ECSMetadataProvider) Stop(ctx context.Context) { + e.u.Stop(ctx) +} + +type ecsMetadataStsResponse struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` + LastUpdated string `json:"LastUpdated"` + Code string `json:"Code"` +} + +func (e *ECSMetadataProvider) getCredentials(ctx context.Context) (*Credentials, error) { + roleName, err := e.getRoleName(ctx) + if err != nil { + if e, ok := err.(*httpError); ok && e.code == 404 { + return nil, NewNotEnableError(fmt.Errorf("get role name from ecs metadata failed: %w", err)) + } + } + path := fmt.Sprintf("/latest/meta-data/ram/security-credentials/%s", roleName) + data, err := e.getMedataDataWithToken(ctx, http.MethodGet, path) + if err != nil { + return nil, err + } + + var obj ecsMetadataStsResponse + if err := json.Unmarshal([]byte(data), &obj); err != nil { + return nil, fmt.Errorf("parse credentials failed: %w", err) + } + if obj.AccessKeyId == "" || obj.AccessKeySecret == "" || + obj.SecurityToken == "" || obj.Expiration == "" { + return nil, fmt.Errorf("parse credentials got unexpected data: %s", + strings.ReplaceAll(data, "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Expiration) + if err != nil { + return nil, fmt.Errorf("parse Expiration (%s) failed: %w", obj.Expiration, err) + } + return &Credentials{ + AccessKeyId: obj.AccessKeyId, + AccessKeySecret: obj.AccessKeySecret, + SecurityToken: obj.SecurityToken, + Expiration: exp, + }, nil +} + +func (e *ECSMetadataProvider) getRoleName(ctx context.Context) (string, error) { + if e.roleName != "" { + return e.roleName, nil + } + name, err := e.getMedataDataWithToken(ctx, http.MethodGet, "/latest/meta-data/ram/security-credentials/") + if err != nil { + return "", err + } + return strings.TrimSpace(name), nil +} + +func (e *ECSMetadataProvider) getMedataToken(ctx context.Context) (string, error) { + if !e.metadataTokenExp.Before(time.Now()) { + return e.metadataToken, nil + } + + e.logger().Debug("start to get metadata token") + h := http.Header{} + h.Set("X-aliyun-ecs-metadata-token-ttl-seconds", fmt.Sprintf("%d", e.metadataTokenTTLSeconds)) + body, err := e.getMedataData(ctx, http.MethodPut, "/latest/api/token", h) + if err != nil { + return "", fmt.Errorf("get metadata token failed: %w", err) + } + + e.metadataToken = strings.TrimSpace(body) + e.metadataTokenExp = time.Now().Add(time.Duration(float64(e.metadataTokenTTLSeconds)*0.8) * time.Second) + + return body, nil +} + +func (e *ECSMetadataProvider) getMedataDataWithToken(ctx context.Context, method, path string) (string, error) { + token, err := e.getMedataToken(ctx) + if err != nil { + if e, ok := err.(*httpError); !(ok && e.code == 404) { + return "", err + } + } + h := http.Header{} + if token != "" { + h.Set("X-aliyun-ecs-metadata-token", token) + } + return e.getMedataData(ctx, method, path, h) +} + +func (e *ECSMetadataProvider) getMedataData(ctx context.Context, method, path string, header http.Header) (string, error) { + url := fmt.Sprintf("%s%s", e.endpoint, path) + return e.client.send(ctx, method, url, header, nil) +} + +func (e *ECSMetadataProvider) logger() Logger { + if e.Logger != nil { + return e.Logger + } + return defaultLog +} + +func (o *ECSMetadataProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.Endpoint == "" { + o.Endpoint = defaultECSMetadataServerEndpoint + } else { + o.Endpoint = strings.TrimRight(o.Endpoint, "/") + } + if o.MetadataTokenTTLSeconds == 0 { + o.MetadataTokenTTLSeconds = defaultECSMetadataTokenTTLSeconds + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go new file mode 100644 index 000000000..7626599ea --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/encryptedfile_provider.go @@ -0,0 +1,135 @@ +package provider + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +const defaultEncryptedFilePath = "/var/addon/token-config" + +type EncryptedFileProvider struct { + f *FileProvider +} + +type EncryptedFileProviderOptions struct { + FilePath string + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger +} + +func NewEncryptedFileProvider(opts EncryptedFileProviderOptions) *EncryptedFileProvider { + opts.applyDefaults() + + e := &EncryptedFileProvider{} + e.f = NewFileProvider(opts.FilePath, parseEncryptedToken, FileProviderOptions{ + RefreshPeriod: opts.RefreshPeriod, + ExpiryWindow: opts.ExpiryWindow, + Logger: opts.Logger, + LogPrefix: "[EncryptedFileProvider]", + }) + + return e +} + +func (e *EncryptedFileProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.f.Credentials(ctx) +} + +func (o *EncryptedFileProviderOptions) applyDefaults() { + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.FilePath == "" { + o.FilePath = defaultEncryptedFilePath + } + if o.Logger == nil { + o.Logger = defaultLog + } +} + +func parseEncryptedToken(data []byte) (*Credentials, error) { + var t encryptedToken + if err := json.Unmarshal(data, &t); err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + if t.Error != nil { + return nil, t.Error + } + + id, err := decrypt(t.AccessKeyId, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + se, err := decrypt(t.AccessKeySecret, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + st, err := decrypt(t.SecurityToken, []byte(t.Keyring)) + if err != nil { + return nil, fmt.Errorf("parse data failed: %w", err) + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.Expiration) + if err != nil { + return nil, fmt.Errorf("parse expiration %s failed: %w", t.Expiration, err) + } + + return &Credentials{ + AccessKeyId: string(id), + AccessKeySecret: string(se), + SecurityToken: string(st), + Expiration: exp, + }, nil +} + +type encryptedToken struct { + AccessKeyId string `json:"access.key.id"` + AccessKeySecret string `json:"access.key.secret"` + SecurityToken string `json:"security.token"` + Expiration string `json:"expiration"` + Keyring string `json:"keyring"` + + Error *encryptedTokenError `json:"error,omitempty"` +} + +type encryptedTokenError struct { + RoleName string `json:"roleName,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e encryptedTokenError) Error() string { + return fmt.Sprintf("assume role %s failed: %s %s", e.RoleName, e.Code, e.Message) +} + +func decrypt(s string, keyring []byte) ([]byte, error) { + cdata, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, err + } + block, err := aes.NewCipher(keyring) + if err != nil { + return nil, err + } + + blockSize := block.BlockSize() + iv := cdata[:blockSize] + blockMode := cipher.NewCBCDecrypter(block, iv) + origData := make([]byte, len(cdata)-blockSize) + + blockMode.CryptBlocks(origData, cdata[blockSize:]) + + origData = pkcs5UnPadding(origData) + return origData, nil +} + +func pkcs5UnPadding(origData []byte) []byte { + length := len(origData) + unpadding := int(origData[length-1]) + return origData[:(length - unpadding)] +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go new file mode 100644 index 000000000..7069b887f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/endpoint.go @@ -0,0 +1,43 @@ +package provider + +import ( + "os" + "strings" +) + +const ( + regionPlaceholder = "{region}" + defaultSTSEndpointTPL = "sts.{region}.aliyuncs.com" + defaultSTSVPCEndpointTPL = "sts-vpc.{region}.aliyuncs.com" +) + +// https://help.aliyun.com/zh/ram/developer-reference/api-sts-2015-04-01-endpoint +var stsEndpointsByRegion = map[string][2]string{ + "": { + defaultSTSEndpoint, + defaultSTSEndpoint, + }, + "__default__": { + defaultSTSEndpointTPL, + defaultSTSVPCEndpointTPL, + }, + "cn-hangzhou-finance": { + "sts.cn-hangzhou.aliyuncs.com", + "sts-vpc.cn-hangzhou.aliyuncs.com", + }, +} + +func GetSTSEndpoint(region string, vpcNetwork bool) string { + if v := os.Getenv(envStsEndpoint); v != "" { + return v + } + endpoints, exist := stsEndpointsByRegion[region] + if !exist { + endpoints = stsEndpointsByRegion["__default__"] + } + endpoint := endpoints[0] + if vpcNetwork { + endpoint = endpoints[1] + } + return strings.ReplaceAll(endpoint, regionPlaceholder, region) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go new file mode 100644 index 000000000..7210140dd --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env.go @@ -0,0 +1,155 @@ +package provider + +import "os" + +const ( + envRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + envOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + envOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" + + envStsEndpoint = "ALIBABA_CLOUD_STS_ENDPOINT" + envStsHttpScheme = "ALIBABA_CLOUD_STS_HTTP_SCHEME" +) + +// https://github.com/aliyun/credentials-go +const ( + envNewSdkAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_ID" + envNewSdkAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envNewSdkSecurityToken = "ALIBABA_CLOUD_SECURITY_TOKEN" // #nosec G101 + envNewSdkRoleSessionName = "ALIBABA_CLOUD_ROLE_SESSION_NAME" + + envNewSdkCredentialsURI = "ALIBABA_CLOUD_CREDENTIALS_URI" // #nosec G101 + + envNewSdkCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" // #nosec G101 + + envRoleSessionName = envNewSdkRoleSessionName + envCredentialsURI = envNewSdkCredentialsURI // #nosec G101 +) + +// https://github.com/aliyun/alibaba-cloud-sdk-go/tree/master/sdk/auth +const ( + envOldSdkAccessKeyID = "ALICLOUD_ACCESS_KEY" + envOldSdkAccessKeySecret = "ALICLOUD_SECRET_KEY" + envOldSdkAccessKeyStsToken = "ALICLOUD_ACCESS_KEY_STS_TOKEN" // #nosec G101 + //envOldSdkRoleArn = "ALICLOUD_ROLE_ARN" + envOldSdkRoleSessionName = "ALICLOUD_ROLE_SESSION_NAME" + //envOldSdkRoleSessionExpiration = "ALICLOUD_ROLE_SESSION_EXPIRATION" + //envOldSdkPrivateKey = "ALICLOUD_PRIVATE_KEY" + //envOldSdkPublicKeyID = "ALICLOUD_PUBLIC_KEY_ID" + //envOldSdkSessionExpiration = "ALICLOUD_SESSION_EXPIRATION" + //envOldSdkRoleName = "ALICLOUD_ROLE_NAME" +) + +const ( + envAliyuncliAccessKeyId1 = "ALIBABACLOUD_ACCESS_KEY_ID" + envAliyuncliAccessKeyId2 = "ALICLOUD_ACCESS_KEY_ID" + envAliyuncliAccessKeyId3 = "ACCESS_KEY_ID" + + envAliyuncliAccessKeySecret1 = "ALIBABACLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAliyuncliAccessKeySecret2 = "ALICLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAliyuncliAccessKeySecret3 = "ACCESS_KEY_SECRET" // #nosec G101 + + envAliyuncliStsToken1 = "ALIBABACLOUD_SECURITY_TOKEN" // #nosec G101 + envAliyuncliStsToken2 = "ALICLOUD_SECURITY_TOKEN" // #nosec G101 + envAliyuncliStsToken3 = "SECURITY_TOKEN" // #nosec G101 + + envAliyuncliProfileName1 = "ALIBABACLOUD_PROFILE" + envAliyuncliProfileName2 = "ALIBABA_CLOUD_PROFILE" + envAliyuncliProfileName3 = "ALICLOUD_PROFILE" + + envAliyuncliIgnoreProfile = "ALIBABACLOUD_IGNORE_PROFILE" + + envAliyuncliProfilePath = "ALIBABACLOUD_PROFILE_PATH" +) + +// https://github.com/aliyun/alibabacloud-credentials-cli +const ( + envAccAlibabaCloudAccessKeyId = "ALIBABACLOUD_ACCESS_KEY_ID" + envAccAlibabaCloudAccessKeySecret = "ALIBABACLOUD_ACCESS_KEY_SECRET" // #nosec G101 + envAccAlibabaCloudSecurityToken = "ALIBABACLOUD_SECURITY_TOKEN" // #nosec G101 +) + +var ( + accessKeyIdEnvs = []string{ + envNewSdkAccessKeyId, + envOldSdkAccessKeyID, + envAliyuncliAccessKeyId1, + envAliyuncliAccessKeyId2, + envAccAlibabaCloudAccessKeyId, + //envAliyuncliAccessKeyId3, + } + + accessKeySecretEnvs = []string{ + envNewSdkAccessKeySecret, + envOldSdkAccessKeySecret, + envAliyuncliAccessKeySecret1, + envAliyuncliAccessKeySecret2, + envAccAlibabaCloudAccessKeySecret, + //envAliyuncliAccessKeySecret3, + } + + securityTokenEnvs = []string{ + envNewSdkSecurityToken, + envOldSdkAccessKeyStsToken, + envAliyuncliStsToken1, + envAliyuncliStsToken2, + envAccAlibabaCloudSecurityToken, + //envAliyuncliStsToken3, + } + + roleArnEnvs = []string{ + envRoleArn, + } + oidcProviderArnEnvs = []string{ + envOidcProviderArn, + } + oidcTokenFileEnvs = []string{ + envOidcTokenFile, + } + roleSessionNameEnvs = []string{ + envNewSdkRoleSessionName, + envOldSdkRoleSessionName, + } + + credentialsURIEnvs = []string{ + envNewSdkCredentialsURI, + } + + credentialFileEnvs = []string{ + envNewSdkCredentialFile, + } + + aliyuncliProfileNameEnvs = []string{ + envAliyuncliProfileName1, + envAliyuncliProfileName2, + envAliyuncliProfileName3, + } + aliyuncliIgnoreProfileEnvs = []string{ + envAliyuncliIgnoreProfile, + } + aliyuncliProfilePathEnvs = []string{ + envAliyuncliProfilePath, + } +) + +func getEnvsValue(keys []string) string { + for _, key := range keys { + v := os.Getenv(key) + if v != "" { + return v + } + } + return "" +} + +func getRoleSessionNameFromEnv() string { + return getEnvsValue(roleSessionNameEnvs) +} + +func getStsEndpointFromEnv() string { + return getEnvsValue([]string{envStsEndpoint}) +} + +func getStsHttpSchemeFromEnv() string { + return getEnvsValue([]string{envStsHttpScheme}) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go new file mode 100644 index 000000000..33783eda2 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/env_provider.go @@ -0,0 +1,128 @@ +package provider + +import ( + "context" + "errors" + "fmt" + "os" +) + +const ( + envAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_ID" + envAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" + envSecurityToken = "ALIBABA_CLOUD_SECURITY_TOKEN" +) + +type EnvProvider struct { + cp CredentialsProvider +} + +type EnvProviderOptions struct { + EnvAccessKeyId string + EnvAccessKeySecret string + EnvSecurityToken string + + EnvRoleArn string + EnvOIDCProviderArn string + EnvOIDCTokenFile string + + EnvCredentialsURI string + + stsEndpoint string +} + +func NewEnvProvider(opts EnvProviderOptions) *EnvProvider { + opts.applyDefaults() + + e := &EnvProvider{} + e.cp = e.getProvider(opts) + + return e +} + +func (e *EnvProvider) Credentials(ctx context.Context) (*Credentials, error) { + cred, err := e.cp.Credentials(ctx) + + if err != nil { + if IsNoAvailableProviderError(err) { + return nil, NewNotEnableError(fmt.Errorf("not found credentials from env: %w", err)) + } + return nil, err + } + + return cred.DeepCopy(), nil +} + +func (e *EnvProvider) Stop(ctx context.Context) { + if s, ok := e.cp.(Stopper); ok { + s.Stop(ctx) + } +} + +func (e *EnvProvider) getProvider(opts EnvProviderOptions) CredentialsProvider { + accessKeyId := os.Getenv(opts.EnvAccessKeyId) + accessKeySecret := os.Getenv(opts.EnvAccessKeySecret) + securityToken := os.Getenv(opts.EnvSecurityToken) + roleArn := os.Getenv(opts.EnvRoleArn) + oidcProviderArn := os.Getenv(opts.EnvOIDCProviderArn) + oidcTokenFile := os.Getenv(opts.EnvOIDCTokenFile) + credentialsURI := os.Getenv(opts.EnvCredentialsURI) + + switch { + case accessKeyId != "" && accessKeySecret != "" && securityToken != "": + return NewSTSTokenProvider( + os.Getenv(opts.EnvAccessKeyId), + os.Getenv(opts.EnvAccessKeySecret), + os.Getenv(opts.EnvSecurityToken), + ) + + case roleArn != "" && oidcProviderArn != "" && oidcTokenFile != "": + return NewOIDCProvider(OIDCProviderOptions{ + RoleArn: os.Getenv(opts.EnvRoleArn), + OIDCProviderArn: os.Getenv(opts.EnvOIDCProviderArn), + OIDCTokenFile: os.Getenv(opts.EnvOIDCTokenFile), + STSEndpoint: opts.stsEndpoint, + }) + + case credentialsURI != "": + return NewURIProvider(credentialsURI, URIProviderOptions{}) + + case accessKeyId != "" && accessKeySecret != "": + return NewAccessKeyProvider( + os.Getenv(opts.EnvAccessKeyId), + os.Getenv(opts.EnvAccessKeySecret), + ) + + default: + return &errorProvider{ + err: NewNoAvailableProviderError( + errors.New("no validated credentials were found in environment variables")), + } + } +} + +func (o *EnvProviderOptions) applyDefaults() { + if o.EnvAccessKeyId == "" { + o.EnvAccessKeyId = envAccessKeyId + } + if o.EnvAccessKeySecret == "" { + o.EnvAccessKeySecret = envAccessKeySecret + } + if o.EnvSecurityToken == "" { + o.EnvSecurityToken = envSecurityToken + } + + if o.EnvRoleArn == "" { + o.EnvRoleArn = defaultEnvRoleArn + } + if o.EnvOIDCProviderArn == "" { + o.EnvOIDCProviderArn = defaultEnvOIDCProviderArn + } + if o.EnvOIDCTokenFile == "" { + o.EnvOIDCTokenFile = defaultEnvOIDCTokenFile + } + + if o.EnvCredentialsURI == "" { + o.EnvCredentialsURI = envCredentialsURI + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go new file mode 100644 index 000000000..a53622892 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/error.go @@ -0,0 +1,55 @@ +package provider + +import "context" + +type NotEnableError struct { + err error +} + +type NoAvailableProviderError struct { + err error +} + +func NewNotEnableError(err error) *NotEnableError { + return &NotEnableError{err: err} +} + +func NewNoAvailableProviderError(err error) *NoAvailableProviderError { + return &NoAvailableProviderError{err: err} +} + +func (e NotEnableError) Error() string { + return e.err.Error() +} + +func (e NoAvailableProviderError) Error() string { + return e.err.Error() +} + +func IsNotEnableError(err error) bool { + if _, ok := err.(*NotEnableError); ok { + return true + } + if _, ok := err.(NotEnableError); ok { + return true + } + return false +} + +func IsNoAvailableProviderError(err error) bool { + if _, ok := err.(*NoAvailableProviderError); ok { + return true + } + if _, ok := err.(NoAvailableProviderError); ok { + return true + } + return false +} + +type errorProvider struct { + err error +} + +func (e errorProvider) Credentials(ctx context.Context) (*Credentials, error) { + return nil, e.err +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go new file mode 100644 index 000000000..5e8eebdec --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/file_provider.go @@ -0,0 +1,76 @@ +package provider + +import ( + "context" + "fmt" + "os" + "time" +) + +type FileProvider struct { + u *Updater + + filepath string + decoder func(data []byte) (*Credentials, error) +} + +type FileProviderOptions struct { + RefreshPeriod time.Duration + ExpiryWindow time.Duration + Logger Logger + LogPrefix string +} + +func NewFileProvider(filepath string, decoder func(data []byte) (*Credentials, error), opts FileProviderOptions) *FileProvider { + opts.applyDefaults() + + e := &FileProvider{ + filepath: filepath, + decoder: decoder, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: opts.LogPrefix, + }) + e.u.Start(context.TODO()) + + return e +} + +func (f *FileProvider) Credentials(ctx context.Context) (*Credentials, error) { + return f.u.Credentials(ctx) +} + +func (f *FileProvider) Stop(ctx context.Context) { + f.u.Stop(ctx) +} + +func (f *FileProvider) getCredentials(ctx context.Context) (*Credentials, error) { + data, err := os.ReadFile(f.filepath) + if err != nil { + if os.IsNotExist(err) { + return nil, NewNotEnableError(fmt.Errorf("read file %s failed: %w", f.filepath, err)) + } + return nil, fmt.Errorf("read file %s failed: %w", f.filepath, err) + } + + cred, err := f.decoder(data) + if err != nil { + return nil, fmt.Errorf("decode data from %s failed: %w", f.filepath, err) + } + return cred, nil +} + +func (f *FileProviderOptions) applyDefaults() { + if f.ExpiryWindow == 0 { + f.ExpiryWindow = defaultExpiryWindow + } + if f.Logger == nil { + f.Logger = defaultLog + } + if f.LogPrefix == "" { + f.LogPrefix = "[FileProvider]" + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go new file mode 100644 index 000000000..08d145e44 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/function_provider.go @@ -0,0 +1,28 @@ +package provider + +import ( + "context" + "errors" +) + +type FunctionProvider struct { + getCredentials func(ctx context.Context) (*Credentials, error) +} + +func NewFunctionProvider(getCredentials func(ctx context.Context) (*Credentials, error)) *FunctionProvider { + return &FunctionProvider{ + getCredentials: getCredentials, + } +} + +func (f *FunctionProvider) Credentials(ctx context.Context) (*Credentials, error) { + if f.getCredentials == nil { + return nil, NewNotEnableError(errors.New("getCredentials function is nil")) + } + + cred, err := f.getCredentials(ctx) + if err != nil { + return nil, err + } + return cred.DeepCopy(), nil +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go new file mode 100644 index 000000000..8bf61a4ed --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/http.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type httpError struct { + code int + message string + data []byte +} + +type commonHttpClient struct { + client httpClient + logger Logger +} + +func newCommonHttpClient(transport http.RoundTripper, timeout time.Duration) *commonHttpClient { + client := &http.Client{ + Transport: transport, + Timeout: timeout, + } + return &commonHttpClient{client: client} +} + +func (c *commonHttpClient) send(ctx context.Context, method, url string, header http.Header, body io.Reader) (string, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return "", fmt.Errorf("can not init request with url %s: %w", url, err) + } + req = req.WithContext(ctx) + req.Header.Set("User-Agent", UserAgent) + for k, items := range header { + for _, v := range items { + req.Header.Add(k, v) + } + } + + if debugMode { + for _, item := range genDebugReqMessages(req) { + c.getLogger().Debug(item) + } + } + + resp, err := c.client.Do(req) + if err != nil { + return "", fmt.Errorf("request %s failed: %w", url, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + c.getLogger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("read body failed when request %s: %w", url, err) + } + if resp.StatusCode != http.StatusOK { + return "", &httpError{ + code: resp.StatusCode, + message: fmt.Sprintf("status code %d is not 200 when request %s: %s", + resp.StatusCode, url, strings.ReplaceAll(string(data), "\n", " ")), + data: data, + } + } + + return string(data), nil +} + +func (c *commonHttpClient) getLogger() Logger { + if c.logger != nil { + return c.logger + } + return defaultLog +} + +func (e httpError) Error() string { + return e.message +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go new file mode 100644 index 000000000..0b8039806 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/log.go @@ -0,0 +1,44 @@ +package provider + +import ( + "log" + "os" + "strings" +) + +var debugMode bool + +type Logger interface { + Info(msg string) + Debug(msg string) + Error(err error, msg string) +} + +var defaultLog Logger = defaultLogger{} + +func init() { + debugEnv := strings.Split(strings.ToLower(os.Getenv("DEBUG")), ",") + for _, item := range debugEnv { + if item == "sdk" || item == "tea" || item == "credentials-provider" { + debugMode = true + break + } + } +} + +type defaultLogger struct { +} + +func (d defaultLogger) Info(msg string) { + log.Print(msg) +} + +func (d defaultLogger) Debug(msg string) { + if debugMode { + log.Print(msg) + } +} + +func (d defaultLogger) Error(err error, msg string) { + log.Print(msg) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go new file mode 100644 index 000000000..62ffc7c70 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/oidc_provider.go @@ -0,0 +1,306 @@ +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +const ( + defaultEnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + defaultEnvOIDCProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + defaultEnvOIDCTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" + + defaultExpiryWindowForAssumeRole = time.Minute * 10 +) + +var ( + defaultSessionName = "default-session-name" + defaultSTSEndpoint = "sts.aliyuncs.com" + defaultSTSScheme = "HTTPS" +) + +type OIDCProvider struct { + u *Updater + + client *http.Client + + stsEndpoint string + stsScheme string + sessionName string + + policy string + durationSeconds string + + roleArn string + oidcProviderArn string + oidcTokenFile string + + Logger Logger +} + +type OIDCProviderOptions struct { + STSEndpoint string + stsScheme string + SessionName string + + TokenDuration time.Duration + Policy string + + RoleArn string + EnvRoleArn string + OIDCProviderArn string + EnvOIDCProviderArn string + OIDCTokenFile string + EnvOIDCTokenFile string + + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func init() { + sessionName := getRoleSessionNameFromEnv() + if sessionName != "" { + defaultSessionName = sessionName + } + if v := getStsEndpointFromEnv(); v != "" { + defaultSTSEndpoint = v + } + if v := getStsHttpSchemeFromEnv(); v != "" { + defaultSTSScheme = strings.ToUpper(v) + } +} + +func NewOIDCProvider(opts OIDCProviderOptions) *OIDCProvider { + opts.applyDefaults() + + client := &http.Client{ + Transport: opts.Transport, + Timeout: opts.Timeout, + } + e := &OIDCProvider{ + client: client, + stsEndpoint: opts.STSEndpoint, + stsScheme: opts.stsScheme, + sessionName: opts.SessionName, + policy: opts.Policy, + roleArn: opts.getRoleArn(), + oidcProviderArn: opts.getOIDCProviderArn(), + oidcTokenFile: opts.getOIDCTokenFile(), + Logger: opts.Logger, + } + if opts.TokenDuration >= time.Second*900 { + ds := int64(opts.TokenDuration.Seconds()) + e.durationSeconds = fmt.Sprintf("%d", ds) + } + + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[OIDCProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (o *OIDCProvider) Credentials(ctx context.Context) (*Credentials, error) { + return o.u.Credentials(ctx) +} + +func (o *OIDCProvider) Stop(ctx context.Context) { + o.u.Stop(ctx) +} + +func (o *OIDCProvider) getCredentials(ctx context.Context) (*Credentials, error) { + roleArn := o.roleArn + oidcProviderArn := o.oidcProviderArn + tokenFile := o.oidcTokenFile + if roleArn == "" || oidcProviderArn == "" || tokenFile == "" { + return nil, NewNotEnableError(errors.New("roleArn, oidcProviderArn or oidcTokenFile is empty")) + } + + tokenData, err := os.ReadFile(tokenFile) + if err != nil { + return nil, err + } + token := string(tokenData) + return o.assumeRoleWithOIDC(ctx, roleArn, oidcProviderArn, token) +} + +type oidcResponse struct { + Credentials *credentialsInResponse `json:"Credentials"` +} + +type credentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` +} + +func (o *OIDCProvider) assumeRoleWithOIDC(ctx context.Context, roleArn, oidcProviderArn, token string) (*Credentials, error) { + reqOpts := newCommonRequest() + reqOpts.Domain = o.stsEndpoint + reqOpts.Scheme = o.stsScheme + reqOpts.Method = "POST" + reqOpts.QueryParams["Timestamp"] = getTimeInFormatISO8601() + reqOpts.QueryParams["Action"] = "AssumeRoleWithOIDC" + reqOpts.QueryParams["Format"] = "JSON" + reqOpts.QueryParams["RoleArn"] = roleArn + reqOpts.QueryParams["OIDCProviderArn"] = oidcProviderArn + reqOpts.BodyParams["OIDCToken"] = token + if o.durationSeconds != "" { + reqOpts.QueryParams["DurationSeconds"] = o.durationSeconds + } + if o.policy != "" { + reqOpts.BodyParams["Policy"] = o.policy + } + reqOpts.QueryParams["RoleSessionName"] = o.sessionName + reqOpts.QueryParams["Version"] = "2015-04-01" + reqOpts.QueryParams["SignatureNonce"] = getUUID() + reqOpts.Headers["Accept-Encoding"] = "identity" + reqOpts.Headers["content-type"] = "application/x-www-form-urlencoded" + reqOpts.URL = reqOpts.BuildURL() + + req, err := http.NewRequest(reqOpts.Method, reqOpts.URL, strings.NewReader(getURLFormedMap(reqOpts.BodyParams))) + if err != nil { + return nil, err + } + for k, v := range reqOpts.Headers { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", UserAgent) + req = req.WithContext(ctx) + + if debugMode { + for _, item := range genDebugReqMessages(req) { + o.logger().Debug(item) + } + } + + resp, err := o.client.Do(req) + if err != nil { + return nil, fmt.Errorf("request %s failed: %w", req.URL, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + o.logger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var obj oidcResponse + if err := json.Unmarshal(data, &obj); err != nil { + return nil, err + } + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("call AssumeRoleWithOIDC failed, got unexpected body: %s", + strings.ReplaceAll(string(data), "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) + if err != nil { + return nil, err + } + return &Credentials{ + AccessKeyId: obj.Credentials.AccessKeyId, + AccessKeySecret: obj.Credentials.AccessKeySecret, + SecurityToken: obj.Credentials.SecurityToken, + Expiration: exp, + }, nil +} + +func (o *OIDCProvider) logger() Logger { + if o.Logger != nil { + return o.Logger + } + return defaultLog +} + +func (o *OIDCProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.STSEndpoint == "" { + o.STSEndpoint = defaultSTSEndpoint + } else { + o.STSEndpoint = strings.TrimRight(o.STSEndpoint, "/") + } + + if strings.HasPrefix(o.STSEndpoint, "https://") { + o.stsScheme = "HTTPS" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "https://") + } else if strings.HasPrefix(o.STSEndpoint, "http://") { + o.stsScheme = "HTTP" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "http://") + } + if o.stsScheme == "" { + o.stsScheme = defaultSTSScheme + } + o.stsScheme = strings.ToUpper(o.stsScheme) + + if o.SessionName == "" { + o.SessionName = defaultSessionName + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindowForAssumeRole + if o.TokenDuration > 0 && o.TokenDuration <= o.ExpiryWindow { + o.ExpiryWindow = o.TokenDuration / 2 + } + } + if o.EnvRoleArn == "" { + o.EnvRoleArn = defaultEnvRoleArn + } + if o.EnvOIDCProviderArn == "" { + o.EnvOIDCProviderArn = defaultEnvOIDCProviderArn + } + if o.EnvOIDCTokenFile == "" { + o.EnvOIDCTokenFile = defaultEnvOIDCTokenFile + } + if o.Logger == nil { + o.Logger = defaultLog + } +} + +func (o *OIDCProviderOptions) getRoleArn() string { + if o.RoleArn != "" { + return o.RoleArn + } + return os.Getenv(o.EnvRoleArn) +} + +func (o *OIDCProviderOptions) getOIDCProviderArn() string { + if o.OIDCProviderArn != "" { + return o.OIDCProviderArn + } + return os.Getenv(o.EnvOIDCProviderArn) +} + +func (o *OIDCProviderOptions) getOIDCTokenFile() string { + if o.OIDCTokenFile != "" { + return o.OIDCTokenFile + } + return os.Getenv(o.EnvOIDCTokenFile) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go new file mode 100644 index 000000000..90e8b63bd --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/provider.go @@ -0,0 +1,24 @@ +package provider + +import ( + "context" + "fmt" + "os" + "path" + "runtime" +) + +var UserAgent = "" + +type CredentialsProvider interface { + Credentials(ctx context.Context) (*Credentials, error) +} + +type Stopper interface { + Stop(ctx context.Context) +} + +func init() { + name := path.Base(os.Args[0]) + UserAgent = fmt.Sprintf("%s %s/%s ack-ram-tool/provider/%s", name, runtime.GOOS, runtime.GOARCH, runtime.Version()) +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go new file mode 100644 index 000000000..3d2f0b3c5 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/req.go @@ -0,0 +1,174 @@ +package provider + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "math/rand" + "net/http" + "net/url" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func shaHmac1(source, secret string) string { + key := []byte(secret) + h := hmac.New(sha1.New, key) + h.Write([]byte(source)) + signedBytes := h.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +type commonRequest struct { + Scheme string + Method string + Domain string + RegionId string + URL string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + BodyParams map[string]string + userAgent map[string]string + QueryParams map[string]string + Headers map[string]string + + queries string +} + +func newCommonRequest() *commonRequest { + return &commonRequest{ + BodyParams: make(map[string]string), + QueryParams: make(map[string]string), + Headers: make(map[string]string), + } +} + +// BuildURL returns a url +func (request *commonRequest) BuildURL() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + request.queries = "/?" + getURLFormedMap(request.QueryParams) + return url + request.queries +} + +func getURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +// BuildStringToSign returns BuildStringToSign +func (request *commonRequest) BuildStringToSign() (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.QueryParams { + signParams[key] = value + } + + for key, value := range request.BodyParams { + signParams[key] = value + } + stringToSign = getURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.Method + "&%2F&" + stringToSign + return +} + +func getTimeInFormatISO8601() (timeStr string) { + return time.Now().UTC().Format("2006-01-02T15:04:05Z") +} + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randStringBytes(n int) string { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[r.Intn(len(letterBytes))] + } + return string(b) +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, randStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = u[8]&(0xff>>2) | (0x02 << 6) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + if _, err := r.Read(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +func getUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +func genDebugReqMessages(req *http.Request) []string { + var ret []string + ret = append(ret, fmt.Sprintf("%s %s", req.Method, req.URL.String())) + ret = append(ret, "Request Headers:") + for k, vs := range req.Header { + ret = append(ret, fmt.Sprintf(" %s: %s", k, strings.Join(vs, ", "))) + } + return ret +} + +func genDebugRespMessages(resp *http.Response) []string { + var ret []string + ret = append(ret, fmt.Sprintf("Response Status: %s", resp.Status)) + ret = append(ret, "Response Headers:") + for k, vs := range resp.Header { + ret = append(ret, fmt.Sprintf(" %s: %s", k, strings.Join(vs, ", "))) + } + return ret +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go new file mode 100644 index 000000000..7b55ebc9b --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/rolearn_provider.go @@ -0,0 +1,240 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type RoleArnProvider struct { + u *Updater + + client *http.Client + + stsEndpoint string + stsScheme string + sessionName string + + policy string + externalId string + durationSeconds string + + roleArn string + cp CredentialsProvider + + Logger Logger +} + +type RoleArnProviderOptions struct { + STSEndpoint string + stsScheme string + SessionName string + + TokenDuration time.Duration + Policy string + ExternalId string + + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewRoleArnProvider(cp CredentialsProvider, roleArn string, opts RoleArnProviderOptions) *RoleArnProvider { + opts.applyDefaults() + + client := &http.Client{ + Transport: opts.Transport, + Timeout: opts.Timeout, + } + e := &RoleArnProvider{ + client: client, + stsEndpoint: opts.STSEndpoint, + stsScheme: opts.stsScheme, + sessionName: opts.SessionName, + policy: opts.Policy, + externalId: opts.ExternalId, + roleArn: roleArn, + cp: cp, + Logger: opts.Logger, + } + if opts.TokenDuration >= time.Second*900 { + ds := int64(opts.TokenDuration.Seconds()) + e.durationSeconds = fmt.Sprintf("%d", ds) + } + + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[RoleArnProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (r *RoleArnProvider) Credentials(ctx context.Context) (*Credentials, error) { + return r.u.Credentials(ctx) +} + +func (r *RoleArnProvider) Stop(ctx context.Context) { + r.u.Stop(ctx) + if s, ok := r.cp.(Stopper); ok { + s.Stop(ctx) + } +} + +func (r *RoleArnProvider) getCredentials(ctx context.Context) (*Credentials, error) { + return r.assumeRole(ctx, r.roleArn) +} + +type roleArnResponse struct { + Credentials *credentialsInResponse `json:"Credentials"` +} + +func (r *RoleArnProvider) assumeRole(ctx context.Context, roleArn string) (*Credentials, error) { + cred, err := r.cp.Credentials(ctx) + if err != nil { + return nil, err + } + + reqOpts := newCommonRequest() + reqOpts.Domain = r.stsEndpoint + reqOpts.Scheme = r.stsScheme + reqOpts.Method = "POST" + reqOpts.QueryParams["Timestamp"] = getTimeInFormatISO8601() + reqOpts.QueryParams["AccessKeyId"] = cred.AccessKeyId + reqOpts.QueryParams["Action"] = "AssumeRole" + reqOpts.QueryParams["Format"] = "JSON" + reqOpts.QueryParams["RoleArn"] = roleArn + if r.durationSeconds != "" { + reqOpts.QueryParams["DurationSeconds"] = r.durationSeconds + } + if r.policy != "" { + reqOpts.BodyParams["Policy"] = r.policy + } + if r.externalId != "" { + reqOpts.QueryParams["ExternalId"] = r.externalId + } + reqOpts.QueryParams["RoleSessionName"] = r.sessionName + reqOpts.QueryParams["SignatureMethod"] = "HMAC-SHA1" + reqOpts.QueryParams["SignatureVersion"] = "1.0" + reqOpts.QueryParams["Version"] = "2015-04-01" + reqOpts.QueryParams["SignatureNonce"] = getUUID() + if cred.SecurityToken != "" { + reqOpts.QueryParams["SecurityToken"] = cred.SecurityToken + } + signature := shaHmac1(reqOpts.BuildStringToSign(), cred.AccessKeySecret+"&") + reqOpts.QueryParams["Signature"] = signature + + reqOpts.Headers["Accept-Encoding"] = "identity" + reqOpts.Headers["content-type"] = "application/x-www-form-urlencoded" + reqOpts.URL = reqOpts.BuildURL() + + req, err := http.NewRequest(reqOpts.Method, reqOpts.URL, nil) + if err != nil { + return nil, err + } + for k, v := range reqOpts.Headers { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", UserAgent) + req = req.WithContext(ctx) + + if debugMode { + for _, item := range genDebugReqMessages(req) { + r.logger().Debug(item) + } + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, fmt.Errorf("request %s failed: %w", req.URL, err) + } + defer resp.Body.Close() + + if debugMode { + for _, item := range genDebugRespMessages(resp) { + r.logger().Debug(item) + } + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var obj roleArnResponse + if err := json.Unmarshal(data, &obj); err != nil { + return nil, err + } + if obj.Credentials == nil || obj.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("call AssumeRole failed, got unexpected body: %s", + strings.ReplaceAll(string(data), "\n", " ")) + } + + exp, err := time.Parse("2006-01-02T15:04:05Z", obj.Credentials.Expiration) + if err != nil { + return nil, err + } + return &Credentials{ + AccessKeyId: obj.Credentials.AccessKeyId, + AccessKeySecret: obj.Credentials.AccessKeySecret, + SecurityToken: obj.Credentials.SecurityToken, + Expiration: exp, + }, nil +} + +func (r *RoleArnProvider) logger() Logger { + if r.Logger != nil { + return r.Logger + } + return defaultLog +} + +func (o *RoleArnProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.STSEndpoint == "" { + o.STSEndpoint = defaultSTSEndpoint + } else { + o.STSEndpoint = strings.TrimRight(o.STSEndpoint, "/") + } + + if strings.HasPrefix(o.STSEndpoint, "https://") { + o.stsScheme = "HTTPS" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "https://") + } else if strings.HasPrefix(o.STSEndpoint, "http://") { + o.stsScheme = "HTTP" + o.STSEndpoint = strings.TrimPrefix(o.STSEndpoint, "http://") + } + if o.stsScheme == "" { + o.stsScheme = defaultSTSScheme + } + o.stsScheme = strings.ToUpper(o.stsScheme) + + if o.SessionName == "" { + o.SessionName = defaultSessionName + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindowForAssumeRole + if o.TokenDuration > 0 && o.TokenDuration <= o.ExpiryWindow { + o.ExpiryWindow = o.TokenDuration / 2 + } + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go new file mode 100644 index 000000000..9164dd8b1 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/semaphore_provider.go @@ -0,0 +1,48 @@ +package provider + +import ( + "context" + "fmt" + "golang.org/x/sync/semaphore" +) + +type SemaphoreProvider struct { + weighted *semaphore.Weighted + + cp CredentialsProvider +} + +type SemaphoreProviderOptions struct { + MaxWeight int64 +} + +func NewSemaphoreProvider(cp CredentialsProvider, opts SemaphoreProviderOptions) *SemaphoreProvider { + opts.applyDefaults() + + w := semaphore.NewWeighted(opts.MaxWeight) + return &SemaphoreProvider{ + weighted: w, + cp: cp, + } +} + +func (p *SemaphoreProvider) Credentials(ctx context.Context) (*Credentials, error) { + if err := p.weighted.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquire semaphore: %w", err) + } + defer p.weighted.Release(1) + + return p.cp.Credentials(ctx) +} + +func (o *SemaphoreProviderOptions) applyDefaults() { + if o.MaxWeight <= 0 { + o.MaxWeight = 1 + } +} + +func (p *SemaphoreProvider) Stop(ctx context.Context) { + if s, ok := p.cp.(Stopper); ok { + s.Stop(ctx) + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go new file mode 100644 index 000000000..fcabdac8f --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/ststoken_provider.go @@ -0,0 +1,35 @@ +package provider + +import ( + "context" + "errors" + "time" +) + +type STSTokenProvider struct { + cred *Credentials +} + +func NewSTSTokenProvider(accessKeyId, accessKeySecret, securityToken string) *STSTokenProvider { + return &STSTokenProvider{ + cred: &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + }, + } +} + +func (a *STSTokenProvider) Credentials(ctx context.Context) (*Credentials, error) { + if a.cred.AccessKeyId == "" || a.cred.AccessKeySecret == "" || a.cred.SecurityToken == "" { + return nil, NewNotEnableError( + errors.New("AccessKeyId, AccessKeySecret or SecurityToken is empty")) + } + + return a.cred.DeepCopy(), nil +} + +func (a *STSTokenProvider) SetExpiration(exp time.Time) *STSTokenProvider { + a.cred.Expiration = exp + return a +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go new file mode 100644 index 000000000..705f436dd --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/updater.go @@ -0,0 +1,206 @@ +package provider + +import ( + "context" + "fmt" + "sync" + "time" +) + +type getCredentialsFunc func(ctx context.Context) (*Credentials, error) + +type Updater struct { + expiryWindow time.Duration + refreshPeriod time.Duration + + // for fix below case: + // * both auth.Signer and credential.Credential are not concurrent safe + expiryWindowForRefreshLoop time.Duration + + getCredentials func(ctx context.Context) (*Credentials, error) + + cred *Credentials + lockForCred sync.RWMutex + + Logger Logger + nowFunc func() time.Time + logPrefix string + + doneCh chan struct{} + stopped bool +} + +type UpdaterOptions struct { + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger + LogPrefix string +} + +func NewUpdater(getter getCredentialsFunc, opts UpdaterOptions) *Updater { + u := &Updater{ + expiryWindow: opts.ExpiryWindow, + refreshPeriod: opts.RefreshPeriod, + expiryWindowForRefreshLoop: opts.RefreshPeriod + opts.RefreshPeriod/2, + getCredentials: getter, + cred: nil, + lockForCred: sync.RWMutex{}, + Logger: opts.Logger, + nowFunc: time.Now, + logPrefix: opts.LogPrefix, + doneCh: make(chan struct{}), + } + return u +} + +func (u *Updater) Start(ctx context.Context) { + if u.refreshPeriod <= 0 { + return + } + + go u.startRefreshLoop(ctx) +} + +func (u *Updater) Stop(shutdownCtx context.Context) { + u.logger().Debug(fmt.Sprintf("%s start to stop...", u.logPrefix)) + + go func() { + u.lockForCred.Lock() + defer u.lockForCred.Unlock() + if u.stopped { + return + } + u.stopped = true + close(u.doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-u.doneCh: + } +} + +func (u *Updater) startRefreshLoop(ctx context.Context) { + ticket := time.NewTicker(u.refreshPeriod) + defer ticket.Stop() + +loop: + for { + select { + case <-ctx.Done(): + break loop + case <-u.doneCh: + break loop + case <-ticket.C: + u.refreshCredForLoop(ctx) + } + } +} + +func (u *Updater) Credentials(ctx context.Context) (*Credentials, error) { + if u.Expired() { + if err := u.refreshCred(ctx); err != nil { + return nil, err + } + } + + cred := u.getCred().DeepCopy() + return cred, nil +} + +func (u *Updater) refreshCredForLoop(ctx context.Context) { + exp := u.expiration() + + if !u.expired(u.expiryWindowForRefreshLoop) { + return + } + + u.logger().Debug(fmt.Sprintf("%s start refresh credentials, current expiration: %s", + u.logPrefix, exp.Format("2006-01-02T15:04:05Z"))) + + maxRetry := 5 + for i := 0; i < maxRetry; i++ { + err := u.refreshCred(ctx) + if err == nil { + return + } + if IsNotEnableError(err) { + return + } + if i < maxRetry-1 { + time.Sleep(time.Second * time.Duration(i)) + } + } +} + +func (u *Updater) refreshCred(ctx context.Context) error { + cred, err := u.getCredentials(ctx) + if err != nil { + if IsNotEnableError(err) { + return err + } + u.logger().Error(err, fmt.Sprintf("%s refresh credentials failed: %s", u.logPrefix, err)) + return err + } + u.logger().Debug(fmt.Sprintf("%s refreshed credentials, expiration: %s", + u.logPrefix, cred.Expiration.Format("2006-01-02T15:04:05Z"))) + + u.setCred(cred) + return nil +} + +func (u *Updater) setCred(cred *Credentials) { + u.lockForCred.Lock() + defer u.lockForCred.Unlock() + + newCred := cred.DeepCopy() + newCred.Expiration = newCred.Expiration.Round(0) + if u.expiryWindow > 0 { + newCred.Expiration = newCred.Expiration.Add(-u.expiryWindow) + } + u.cred = newCred +} + +func (u *Updater) getCred() *Credentials { + u.lockForCred.RLock() + defer u.lockForCred.RUnlock() + + return u.cred +} + +func (u *Updater) Expired() bool { + return u.expired(0) +} + +func (u *Updater) expired(expiryDelta time.Duration) bool { + exp := u.expiration() + if expiryDelta > 0 { + exp = exp.Add(-expiryDelta) + } + + return exp.Before(u.now()) +} + +func (u *Updater) expiration() time.Time { + cred := u.getCred() + + if cred == nil { + return time.Time{} + } + + return cred.Expiration.Round(0) +} + +func (u *Updater) now() time.Time { + if u.nowFunc == nil { + return time.Now() + } + return u.nowFunc() +} + +func (u *Updater) logger() Logger { + if u.Logger != nil { + return u.Logger + } + return defaultLog +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go new file mode 100644 index 000000000..962e728e7 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/uri_provider.go @@ -0,0 +1,117 @@ +package provider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +type URIProvider struct { + u *Updater + + url string + + client *commonHttpClient + Logger Logger +} + +type URIProviderOptions struct { + Timeout time.Duration + Transport http.RoundTripper + + ExpiryWindow time.Duration + RefreshPeriod time.Duration + Logger Logger +} + +func NewURIProvider(url string, opts URIProviderOptions) *URIProvider { + opts.applyDefaults() + + client := newCommonHttpClient(opts.Transport, opts.Timeout) + client.logger = opts.Logger + + e := &URIProvider{ + url: url, + client: client, + Logger: opts.Logger, + } + e.u = NewUpdater(e.getCredentials, UpdaterOptions{ + ExpiryWindow: opts.ExpiryWindow, + RefreshPeriod: opts.RefreshPeriod, + Logger: opts.Logger, + LogPrefix: "[URIProvider]", + }) + e.u.Start(context.TODO()) + + return e +} + +func (e *URIProvider) Credentials(ctx context.Context) (*Credentials, error) { + return e.u.Credentials(ctx) +} + +func (e *URIProvider) Stop(ctx context.Context) { + e.u.Stop(ctx) +} + +func (e *URIProvider) getCredentials(ctx context.Context) (*Credentials, error) { + if e.url == "" { + return nil, NewNotEnableError(errors.New("URL is empty")) + } + + data, err := e.client.send(ctx, http.MethodGet, e.url, http.Header{}, nil) + if err != nil { + return nil, err + } + + var obj ecsMetadataStsResponse + if err := json.Unmarshal([]byte(data), &obj); err != nil { + return nil, fmt.Errorf("parse credentials failed: %w", err) + } + if obj.AccessKeyId == "" || obj.AccessKeySecret == "" { + return nil, fmt.Errorf("parse credentials got unexpected data: %s", + strings.ReplaceAll(data, "\n", " ")) + } + + var exp time.Time + if obj.Expiration != "" { + exp, err = time.Parse("2006-01-02T15:04:05Z", obj.Expiration) + if err != nil { + return nil, fmt.Errorf("parse Expiration failed: %w", err) + } + } + + return &Credentials{ + AccessKeyId: obj.AccessKeyId, + AccessKeySecret: obj.AccessKeySecret, + SecurityToken: obj.SecurityToken, + Expiration: exp, + }, nil +} + +func (e *URIProvider) logger() Logger { + if e.Logger != nil { + return e.Logger + } + return defaultLog +} + +func (o *URIProviderOptions) applyDefaults() { + if o.Timeout <= 0 { + o.Timeout = defaultClientTimeout + } + if o.Transport == nil { + ts := http.DefaultTransport.(*http.Transport).Clone() + o.Transport = ts + } + if o.ExpiryWindow == 0 { + o.ExpiryWindow = defaultExpiryWindow + } + if o.Logger == nil { + o.Logger = defaultLog + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go new file mode 100644 index 000000000..fd2981cc0 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v1sdk.go @@ -0,0 +1,92 @@ +package provider + +import ( + "context" + "fmt" + "time" +) + +type SignerForV1SDK struct { + p CredentialsProvider + Logger Logger + credentialRetrievalTimeout time.Duration +} + +type SignerForV1SDKOptions struct { + Logger Logger + CredentialRetrievalTimeout time.Duration +} + +func NewSignerForV1SDK(p CredentialsProvider, opts SignerForV1SDKOptions) *SignerForV1SDK { + opts.applyDefaults() + + return &SignerForV1SDK{ + p: p, + Logger: opts.Logger, + credentialRetrievalTimeout: opts.CredentialRetrievalTimeout, + } +} + +func (s *SignerForV1SDK) GetName() string { + return "HMAC-SHA1" +} + +func (s *SignerForV1SDK) GetType() string { + return "" +} + +func (s *SignerForV1SDK) GetVersion() string { + return "1.0" +} + +func (s *SignerForV1SDK) GetAccessKeyId() (string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + return "", err + } + return cred.AccessKeyId, nil +} + +func (s *SignerForV1SDK) GetExtraParam() map[string]string { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + s.logger().Error(err, fmt.Sprintf("get credentials failed: %s", err)) + return nil + } + if cred.SecurityToken != "" { + return map[string]string{"SecurityToken": cred.SecurityToken} + } + return nil +} + +func (s *SignerForV1SDK) Sign(stringToSign, secretSuffix string) string { + timeoutCtx, cancel := context.WithTimeout(context.Background(), s.credentialRetrievalTimeout) + defer cancel() + cred, err := s.p.Credentials(timeoutCtx) + if err != nil { + s.logger().Error(err, fmt.Sprintf("get credentials failed: %s", err)) + return "" + } + secret := cred.AccessKeySecret + secretSuffix + return shaHmac1(stringToSign, secret) +} + +func (s *SignerForV1SDK) logger() Logger { + if s.Logger != nil { + return s.Logger + } + return defaultLog +} + +func (o *SignerForV1SDKOptions) applyDefaults() { + if o.Logger == nil { + o.Logger = defaultLog + } + if o.CredentialRetrievalTimeout <= 0 { + o.CredentialRetrievalTimeout = defaultTimeout + } +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go new file mode 100644 index 000000000..713695993 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider/v2sdk.go @@ -0,0 +1,87 @@ +package provider + +import ( + "context" + "time" +) + +var defaultTimeout = time.Minute * 10 + +type CredentialForV2SDK struct { + p CredentialsProvider + Logger Logger + credentialRetrievalTimeout time.Duration +} + +type CredentialForV2SDKOptions struct { + Logger Logger + CredentialRetrievalTimeout time.Duration +} + +func NewCredentialForV2SDK(p CredentialsProvider, opts CredentialForV2SDKOptions) *CredentialForV2SDK { + opts.applyDefaults() + + return &CredentialForV2SDK{ + p: p, + Logger: opts.Logger, + credentialRetrievalTimeout: opts.CredentialRetrievalTimeout, + } +} + +func (c *CredentialForV2SDK) GetAccessKeyId() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.AccessKeyId), nil +} + +func (c *CredentialForV2SDK) GetAccessKeySecret() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.AccessKeySecret), nil +} + +func (c *CredentialForV2SDK) GetSecurityToken() (*string, error) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), c.credentialRetrievalTimeout) + defer cancel() + cred, err := c.p.Credentials(timeoutCtx) + if err != nil { + return nil, err + } + return stringPointer(cred.SecurityToken), nil +} + +func (c *CredentialForV2SDK) GetBearerToken() *string { + return stringPointer("") +} + +func (c *CredentialForV2SDK) GetType() *string { + return stringPointer("CredentialForV2SDK") +} + +func (c *CredentialForV2SDK) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLog +} + +func (o *CredentialForV2SDKOptions) applyDefaults() { + if o.Logger == nil { + o.Logger = defaultLog + } + if o.CredentialRetrievalTimeout <= 0 { + o.CredentialRetrievalTimeout = defaultTimeout + } +} + +func stringPointer(s string) *string { + return &s +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index d13f2e0b3..c03a6ba03 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,20 @@ # Release History +## 1.15.0 (2024-10-14) + +### Features Added + +* `BearerTokenPolicy` handles CAE claims challenges + +### Bugs Fixed + +* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled. +* Fixed an integer overflow in the retry policy. + +### Other Changes + +* Update dependencies. + ## 1.14.0 (2024-08-07) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 765fbc684..8ad3d5400 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -5,7 +5,6 @@ package runtime import ( "context" - "encoding/base64" "fmt" "net/http" "strings" @@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ - OnChallenge: p.onChallenge, - OnRequest: p.onRequest, + OnRequest: p.onRequest, }, }) return p } -func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { - challenge := res.Header.Get(shared.HeaderWWWAuthenticate) - claims, err := parseChallenge(challenge) - if err != nil { - // the challenge contains claims we can't parse - return err - } else if claims != "" { - // request a new token having the specified claims, send the request again - return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) - } - // auth challenge didn't include claims, so this is a simple authorization failure - return azruntime.NewResponseError(res) -} - // onRequest authorizes requests with one or more bearer tokens func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { // authorize the request with a token for the primary tenant - err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes}) if err != nil || len(b.auxResources) == 0 { return err } @@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { return b.btp.Do(req) } - -// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token -// that will satisfy conditional access policies. It returns a non-nil error when the given value contains -// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. -func parseChallenge(wwwAuthenticate string) (string, error) { - claims := "" - var err error - for _, param := range strings.Split(wwwAuthenticate, ",") { - if _, after, found := strings.Cut(param, "claims="); found { - if claims != "" { - // The header contains multiple challenges, at least two of which specify claims. The specs allow this - // but it's unclear what a client should do in this case and there's as yet no concrete example of it. - err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) - break - } - // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded - claims = strings.Trim(after, `\"=`) - // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" - if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { - claims = string(b) - } else { - err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) - break - } - } - } - return claims, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go index 17bd50c67..03cb227d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" // ResponseError is returned when a request is made to a service and // the service returns a non-success HTTP status code. // Use errors.As() to access this type in the error chain. +// +// When marshaling instances, the RawResponse field will be omitted. +// However, the contents returned by Error() will be preserved. type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 08a954587..8aec256bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -117,12 +117,18 @@ type ResponseError struct { StatusCode int // RawResponse is the underlying HTTP response. - RawResponse *http.Response + RawResponse *http.Response `json:"-"` + + errMsg string } // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + if e.errMsg != "" { + return e.errMsg + } + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} @@ -163,5 +169,33 @@ func (e *ResponseError) Error() string { } fmt.Fprintln(msg, separator) - return msg.String() + e.errMsg = msg.String() + return e.errMsg +} + +// internal type used for marshaling/unmarshaling +type responseError struct { + ErrorCode string `json:"errorCode"` + StatusCode int `json:"statusCode"` + ErrorMessage string `json:"errorMessage"` +} + +func (e ResponseError) MarshalJSON() ([]byte, error) { + return json.Marshal(responseError{ + ErrorCode: e.ErrorCode, + StatusCode: e.StatusCode, + ErrorMessage: e.Error(), + }) +} + +func (e *ResponseError) UnmarshalJSON(data []byte) error { + re := responseError{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + e.ErrorCode = re.ErrorCode + e.StatusCode = re.StatusCode + e.errMsg = re.ErrorMessage + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 7cb8c207e..c1c8984c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.14.0" + Version = "v1.15.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index 8d9845358..bb37a5efb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -161,19 +161,20 @@ type BearerTokenOptions struct { // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. type AuthorizationHandler struct { - // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token - // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, - // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't - // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a - // token from its credential according to its configuration. + // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest + // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request + // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send + // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token + // from its credential according to its configuration. OnRequest func(*Request, func(TokenRequestOptions) error) error - // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the - // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible - // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's - // given credential. Implementations that need to perform I/O should use the Request's context, available from - // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, - // the policy will return any 401 response to the client. + // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon + // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle. + // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the + // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given + // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When + // OnChallenge returns nil, the policy will send the Request again. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index cb2a69528..7ed66be38 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -4,9 +4,12 @@ package runtime import ( + "encoding/base64" "errors" "net/http" + "regexp" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -17,6 +20,11 @@ import ( ) // BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle +// additional authentication challenges, or needing more control over authorization, should +// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions]. +// +// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation type BearerTokenPolicy struct { // mainResource is the resource to be retreived using the tenant specified in the credential mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] @@ -51,8 +59,18 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * if opts == nil { opts = &policy.BearerTokenOptions{} } + ah := opts.AuthorizationHandler + if ah.OnRequest == nil { + // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge + // doesn't get a default so the policy can use a nil check to determine whether the caller + // provided an implementation. + ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + // authNZ sets EnableCAE: true in all cases, no need to duplicate that here + return authNZ(policy.TokenRequestOptions{Scopes: scopes}) + } + } return &BearerTokenPolicy{ - authzHandler: opts.AuthorizationHandler, + authzHandler: ah, cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), @@ -63,6 +81,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * // authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { return func(tro policy.TokenRequestOptions) error { + tro.EnableCAE = true as := acquiringResourceState{p: b, req: req, tro: tro} tk, err := b.mainResource.Get(as) if err != nil { @@ -86,12 +105,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } - var err error - if b.authzHandler.OnRequest != nil { - err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) - } else { - err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) - } + err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) if err != nil { return nil, errorinfo.NonRetriableError(err) } @@ -101,17 +115,51 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } + res, err = b.handleChallenge(req, res, false) + return res, err +} + +// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling +// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge. +// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the +// AuthorizationHandler. +func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) { + var err error if res.StatusCode == http.StatusUnauthorized { b.mainResource.Expire() - if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { - if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { - res, err = req.Next() + if res.Header.Get(shared.HeaderWWWAuthenticate) != "" { + caeChallenge, parseErr := parseCAEChallenge(res) + if parseErr != nil { + return res, parseErr + } + switch { + case caeChallenge != nil: + authNZ := func(tro policy.TokenRequestOptions) error { + // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value + // will be empty at time of writing because CAE is the only feature involving claims. If in + // the future some client needs to specify unrelated claims, this function may need to merge + // them with the challenge claims. + tro.Claims = caeChallenge.params["claims"] + return b.authenticateAndAuthorize(req)(tro) + } + err = b.authzHandler.OnRequest(req, authNZ) + if err == nil { + res, err = req.Next() + } + case b.authzHandler.OnChallenge != nil && !recursed: + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + if res, err = req.Next(); err == nil { + res, err = b.handleChallenge(req, res, true) + } + } else { + // don't retry challenge handling errors + err = errorinfo.NonRetriableError(err) + } + default: + // return the response to the pipeline } } } - if err != nil { - err = errorinfo.NonRetriableError(err) - } return res, err } @@ -121,3 +169,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { } return nil } + +// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none). +// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError. +func parseCAEChallenge(res *http.Response) (*authChallenge, error) { + var ( + caeChallenge *authChallenge + err error + ) + for _, c := range parseChallenges(res) { + if c.scheme == "Bearer" { + if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" { + if b, de := base64.StdEncoding.DecodeString(claims); de == nil { + c.params["claims"] = string(b) + caeChallenge = &c + } else { + // don't include the decoding error because it's something + // unhelpful like "illegal base64 data at input byte 42" + err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims)) + } + break + } + } + } + return caeChallenge, err +} + +var ( + challenge, challengeParams *regexp.Regexp + once = &sync.Once{} +) + +type authChallenge struct { + scheme string + params map[string]string +} + +// parseChallenges assumes authentication challenges have quoted parameter values +func parseChallenges(res *http.Response) []authChallenge { + once.Do(func() { + // matches challenges having quoted parameters, capturing scheme and parameters + challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`) + // captures parameter names and values in a match of the above expression + challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`) + }) + parsed := []authChallenge{} + // WWW-Authenticate can have multiple values, each containing multiple challenges + for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) { + for _, sm := range challenge.FindAllStringSubmatch(h, -1) { + // sm is [challenge, scheme, params] (see regexp documentation on submatches) + c := authChallenge{ + params: make(map[string]string), + scheme: sm[1], + } + for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) { + // sm is [key="value", key, value] (see regexp documentation on submatches) + c.params[sm[1]] = sm[2] + } + parsed = append(parsed, c) + } + } + return parsed +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index e15eea824..4c3a31fea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - delay := time.Duration((1< o.MaxRetryDelay { + delayFloat := float64(delay) * jitterMultiplier + if delayFloat > float64(math.MaxInt64) { + // the jitter pushed us over MaxInt64, so just use MaxInt64 + delay = time.Duration(math.MaxInt64) + } else { + delay = time.Duration(delayFloat) + } + + if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value delay = o.MaxRetryDelay } + return delay } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md new file mode 100644 index 000000000..ea267e4f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -0,0 +1,10 @@ +# Breaking Changes + +## v1.6.0 + +### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios + +As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed +identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint +is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error +response can appear in logs but doesn't indicate authentication failed. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index a8c2feb6d..e35f5ad93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,52 @@ # Release History +## 1.8.0 (2024-10-08) + +### Other Changes +* `AzurePipelinesCredential` sets an additional OIDC request header so that it + receives a 401 instead of a 302 after presenting an invalid system access token +* Allow logging of debugging headers for `AzurePipelinesCredential` and include + them in error messages + +## 1.8.0-beta.3 (2024-09-17) + +### Features Added +* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID` + +### Other Changes +* Removed redundant content from error messages + +## 1.8.0-beta.2 (2024-08-06) + +### Breaking Changes +* `NewManagedIdentityCredential` now returns an error when a user-assigned identity + is specified on a platform whose managed identity API doesn't support that. + `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. + Returning an error instead prevents the credential authenticating an unexpected + identity, causing a client to act with unexpected privileges. The affected + platforms are: + * Azure Arc + * Azure ML (when a resource ID is specified; client IDs are supported) + * Cloud Shell + * Service Fabric + +### Other Changes +* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before + attempting to authenticate a managed identity, it continues to the next credential + in the chain instead of immediately returning an error. + +## 1.8.0-beta.1 (2024-07-17) + +### Features Added +* Restored persistent token caching feature + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Redesigned the persistent caching API. Encryption is now required in all cases + and persistent cache construction is separate from credential construction. + The `PersistentUserAuthentication` example in the package docs has been updated + to demonstrate the new API. + ## 1.7.0 (2024-06-20) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 7e201ea2f..96f30b25c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,7 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: +`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: ![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) @@ -126,12 +126,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ## Credential Types -### Authenticating Azure Hosted Applications +### Credential chains |Credential|Usage |-|- |[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps |[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials + +### Authenticating Azure-Hosted Applications + +|Credential|Usage +|-|- |[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables |[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource |[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes @@ -158,7 +163,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI -|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI +|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index fbaa29220..e0bd09c63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -1,57 +1,40 @@ ## Token caching in the Azure Identity client module -*Token caching* is a feature provided by the Azure Identity library that allows apps to: +Token caching helps apps: - Improve their resilience and performance. -- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. -- Reduce the number of times the user is prompted to authenticate. +- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times users are prompted to authenticate. -When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use. -Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. +#### Caching can't be disabled -### In-memory token caching - -*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. - -**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. +Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances. -#### Caching cannot be disabled +### In-memory token caching -As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. +Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache. ### Persistent token caching -> Only azidentity v1.5.0-beta versions support persistent token caching +Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs. -*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. +Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Storage mechanism | +| Operating system | Encryption facility | |------------------|---------------------------------------| | Linux | kernel key retention service (keyctl) | | macOS | Keychain | -| Windows | DPAPI | - -By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. -However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. +| Windows | Data Protection API (DPAPI) | -With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: - -- Makes the app more resilient to failures. -- Ensures the app can continue to function during an Entra ID outage or disruption. -- Avoids having to prompt users to authenticate each time the process is restarted. - ->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. - -#### Example code - -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. ### Credentials supporting token caching The following table indicates the state of in-memory and persistent caching in each credential type. -**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). +**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | |--------------------------------|---------------------------------------------------------------------|--------------------------| @@ -66,6 +49,9 @@ The following table indicates the state of in-memory and persistent caching in e | `EnvironmentCredential` | Supported | Not Supported | | `InteractiveBrowserCredential` | Supported | Supported | | `ManagedIdentityCredential` | Supported | Not Supported | -| `OnBehalfOfCredential` | Supported | Supported | +| `OnBehalfOfCredential` | Supported | Not Supported | | `UsernamePasswordCredential` | Supported | Supported | | `WorkloadIdentityCredential` | Supported | Supported | + +[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication +[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 54016a070..c24f67e84 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -234,7 +234,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| | AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| -|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| ## Get additional help diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index bff0c44da..045f87acd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_087379b475" + "Tag": "go/azidentity_c55452bbf6" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go index ada4d6501..840a71469 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -18,10 +18,10 @@ import ( var supportedAuthRecordVersions = []string{"1.0"} -// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as // [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication -// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. -type authenticationRecord struct { +// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user. +type AuthenticationRecord struct { // Authority is the URL of the authority that issued the token. Authority string `json:"authority"` @@ -42,11 +42,11 @@ type authenticationRecord struct { } // UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord -func (a *authenticationRecord) UnmarshalJSON(b []byte) error { +func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error { // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally // different type enables this by assigning all the fields without recursing into this method. - type r authenticationRecord + type r AuthenticationRecord err := json.Unmarshal(b, (*r)(a)) if err != nil { return err @@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error { } // account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. -func (a *authenticationRecord) account() public.Account { +func (a *AuthenticationRecord) account() public.Account { return public.Account{ Environment: a.Authority, HomeAccountID: a.HomeAccountID, @@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account { } } -func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { +func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) { u, err := url.Parse(ar.IDToken.Issuer) if err != nil { - return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) } tenant := ar.IDToken.TenantID if tenant == "" { @@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) if username == "" { username = ar.IDToken.UPN } - return authenticationRecord{ + return AuthenticationRecord{ Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), ClientID: ar.IDToken.Audience, HomeAccountID: ar.Account.HomeAccountID, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index b0965036b..ce55dc658 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -53,8 +53,14 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -// tokenCachePersistenceOptions contains options for persistent token caching -type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache = internal.Cache // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go index 80c1806bb..a4b8ab6f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -20,6 +20,8 @@ const ( credNameAzurePipelines = "AzurePipelinesCredential" oidcAPIVersion = "7.1" systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" + xMsEdgeRef = "x-msedge-ref" + xVssE2eId = "x-vss-e2eid" ) // AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See @@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system if options == nil { options = &AzurePipelinesCredentialOptions{} } + // these headers are useful to the DevOps team when debugging OIDC error responses + options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId) caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } @@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID url, err := runtime.EncodeQueryParams(url) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil) } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil) } req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + // instruct endpoint to return 401 instead of 302, if the system access token is invalid + req.Header.Set("X-TFS-FedAuthRedirect", "Suppress") res, err := doForClient(a.cred.client.azClient, req) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil) } if res.StatusCode != http.StatusOK { - msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration." + for _, h := range []string{xMsEdgeRef, xVssE2eId} { + if v := res.Header.Get(h); v != "" { + msg += fmt.Sprintf("\n%s: %s", h, v) + } + } // include the response because its body, if any, probably contains an error message. // OK responses aren't included with errors because they probably contain secrets - return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res) } b, err := runtime.Payload(res) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil) } var r struct { OIDCToken string `json:"oidcToken"` } err = json.Unmarshal(b, &r) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil) } return r.OIDCToken, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 6c35a941b..2460f66ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -113,11 +113,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token if err != nil { // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise msg := createChainedErrorMessage(errs) - if errors.As(err, &unavailableErr) { + var authFailedErr *AuthenticationFailedError + switch { + case errors.As(err, &authFailedErr): + err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse) + if af, ok := err.(*AuthenticationFailedError); ok { + // stop Error() printing the response again; it's already in msg + af.omitResponse = true + } + case errors.As(err, &unavailableErr): err = newCredentialUnavailableError(c.name, msg) - } else { + default: res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, msg, res, err) + err = newAuthenticationFailedError(c.name, msg, res) } } return token, err @@ -126,7 +134,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token func createChainedErrorMessage(errs []error) string { msg := "failed to acquire a token.\nAttempted credentials:" for _, err := range errs { - msg += fmt.Sprintf("\n\t%s", err.Error()) + msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t")) } return msg } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 4cd8c5144..62c12b546 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,16 +26,27 @@ extends: parameters: CloudConfig: Public: + ServiceConnection: azure-sdk-tests + SubscriptionConfigurationFilePaths: + - eng/common/TestResources/sub-config/AzurePublicMsft.json SubscriptionConfigurations: - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) - EnvVars: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) + EnableRaceDetector: true RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + PreSteps: + - task: AzureCLI@2 + displayName: Set OIDC token + inputs: + addSpnToEnvironment: true + azureSubscription: azure-sdk-tests + inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" + scriptLocation: inlineScript + scriptType: pscore MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index b588750ef..2307da86f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 80cd96b56..9e6bca1c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct { // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 9e6772e9b..f0890fe1e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct { // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache } // ClientSecretCredential authenticates an application with a client secret. @@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 3bd08c685..7059a510c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -29,8 +29,8 @@ type confidentialClientOptions struct { AdditionallyAllowedTenants []string // Assertion for on-behalf-of authentication Assertion string + Cache Cache DisableInstanceDiscovery, SendX5C bool - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -107,12 +107,12 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } } if err != nil { - // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. - // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr credentialUnavailable - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, err.Error(), res, err) + var ( + authFailedErr *AuthenticationFailedError + unavailableErr credentialUnavailable + ) + if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) { + err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) @@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { - cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + cache, err := internal.ExportReplace(c.opts.Cache, enableCAE) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 551d31994..3cfc0f7bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -36,10 +36,13 @@ type DefaultAzureCredentialOptions struct { TenantID string } -// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. -// It combines credentials suitable for deployment with credentials suitable for local development. -// It attempts to authenticate with each of these credential types, in the following order, stopping -// when one provides a token: +// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by +// combining credentials used in Azure hosting environments and credentials used in local development. In +// production, it's better to use a specific credential type so authentication is more predictable and easier +// to debug. +// +// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, +// stopping when one provides a token: // // - [EnvironmentCredential] // - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index cd30bedd5..53c4c7287 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -25,18 +25,26 @@ type DeviceCodeCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -49,9 +57,6 @@ type DeviceCodeCredentialOptions struct { // applications. TenantID string - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions - // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -101,12 +106,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, DeviceCodePrompt: cp.UserPrompt, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - Record: cp.authenticationRecord, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + Record: cp.AuthenticationRecord, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -116,8 +121,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate prompts a user to log in via the device code flow. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 35fa01d13..b05cb035a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -38,18 +38,30 @@ type AuthenticationFailedError struct { // RawResponse is the HTTP response motivating the error, if available. RawResponse *http.Response - credType string - message string - err error + credType, message string + omitResponse bool } -func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { - return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +func newAuthenticationFailedError(credType, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error. +// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error +// message, because that message is redundant given the response. If the original error isn't a +// CallErr, the returned error incorporates its message. +func newAuthenticationFailedErrorFromMSAL(credType string, err error) error { + msg := "" + res := getResponseFromError(err) + if res == nil { + msg = err.Error() + } + return newAuthenticationFailedError(credType, msg, res) } // Error implements the error interface. Note that the message contents are not contractual and can change over time. func (e *AuthenticationFailedError) Error() string { - if e.RawResponse == nil { + if e.RawResponse == nil || e.omitResponse { return e.credType + ": " + e.message } msg := &bytes.Buffer{} @@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprintln(msg, "Request information not available") } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") body, err := runtime.Payload(e.RawResponse) switch { @@ -109,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token // because the credential requires user interaction and is configured not to request it automatically. -type authenticationRequiredError struct { +type AuthenticationRequiredError struct { credentialUnavailableError // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. TokenRequestOptions policy.TokenRequestOptions } -func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { - return &authenticationRequiredError{ +func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &AuthenticationRequiredError{ credentialUnavailableError: credentialUnavailableError{ credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", }, @@ -128,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti } var ( - _ credentialUnavailable = (*authenticationRequiredError)(nil) - _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) + _ credentialUnavailable = (*AuthenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil) ) type credentialUnavailable interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum deleted file mode 100644 index c592f283b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ /dev/null @@ -1,60 +0,0 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 056785a8a..848db16e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -24,18 +24,26 @@ type InteractiveBrowserCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -54,9 +62,6 @@ type InteractiveBrowserCredentialOptions struct { // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -82,13 +87,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, LoginHint: cp.LoginHint, - Record: cp.authenticationRecord, + Record: cp.AuthenticationRecord, RedirectURL: cp.RedirectURL, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -97,8 +102,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate opens the default browser so a user can log in. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go new file mode 100644 index 000000000..c0cfe7606 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache struct { + // impl is a pointer so a Cache can carry persistent state across copies + impl *impl +} + +// impl is a Cache's private implementation +type impl struct { + // factory constructs storage implementations + factory func(bool) (cache.ExportReplace, error) + // cae and noCAE are previously constructed storage implementations. CAE + // and non-CAE tokens must be stored separately because MSAL's cache doesn't + // observe token claims. If a single storage implementation held both kinds + // of tokens, it could create a reauthentication or error loop by returning + // a non-CAE token lacking a required claim. + cae, noCAE cache.ExportReplace + // mu synchronizes around cae and noCAE + mu *sync.RWMutex +} + +func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) { + if i == nil { + // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory + return nil, nil + } + var ( + err error + xr cache.ExportReplace + ) + i.mu.RLock() + xr = i.cae + if !cae { + xr = i.noCAE + } + i.mu.RUnlock() + if xr != nil { + return xr, nil + } + i.mu.Lock() + defer i.mu.Unlock() + if cae { + if i.cae == nil { + if xr, err = i.factory(cae); err == nil { + i.cae = xr + } + } + return i.cae, err + } + if i.noCAE == nil { + if xr, err = i.factory(cae); err == nil { + i.noCAE = xr + } + } + return i.noCAE, err +} + +// NewCache is the constructor for Cache. It takes a factory instead of an instance +// because it doesn't know whether the Cache will store both CAE and non-CAE tokens. +func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache { + return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}} +} + +// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface. +// It's a function instead of a method on Cache so packages in azidentity and +// azidentity/cache can call it while applications can't. "cae" declares whether the +// caller intends this implementation to store CAE tokens. +func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) { + return c.impl.exportReplace(cae) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go deleted file mode 100644 index b1b4d5c8b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -// TokenCachePersistenceOptions contains options for persistent token caching -type TokenCachePersistenceOptions struct { - // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text - // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts - // encryption before falling back to plaintext storage. - AllowUnencryptedStorage bool - - // Name identifies the cache. Set this to isolate data from other applications. - Name string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go deleted file mode 100644 index c1498b464..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "errors" - - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" -) - -var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") - -// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to -// use a persistent cache must first import the cache module, which will replace this function -// with a platform-specific implementation. -var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { - if o == nil { - return nil, nil - } - return nil, errMissingImport -} - -// CacheFilePath returns the path to the cache file for the given name. -// Defining it in this package makes it available to azidentity tests. -var CacheFilePath = func(name string) (string, error) { - return "", errMissingImport -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 6122cc700..4c657a92e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -143,6 +143,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if endpoint, ok := os.LookupEnv(identityEndpoint); ok { if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { + if options.ID != nil { + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Service Fabric" c.endpoint = endpoint c.msiType = msiTypeServiceFabric @@ -152,6 +155,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAppService } } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + if options.ID != nil { + return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Azure Arc" c.endpoint = endpoint c.msiType = msiTypeAzureArc @@ -159,9 +165,15 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { c.endpoint = endpoint if _, ok := os.LookupEnv(msiSecret); ok { + if options.ID != nil && options.ID.idKind() != miClientID { + return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only") + } env = "Azure ML" c.msiType = msiTypeAzureML } else { + if options.ID != nil { + return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities") + } env = "Cloud Shell" c.msiType = msiTypeCloudShell } @@ -207,9 +219,10 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) - if err == nil { - _, err = c.azClient.Pipeline().Do(req) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } + res, err := c.azClient.Pipeline().Do(req) if err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { @@ -217,7 +230,16 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } - // send normal token requests from now on because something responded + // because IMDS always responds with JSON, assume a non-JSON response is from something else, such + // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating + b, err := azruntime.Payload(res) + if err != nil { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) + } + if !json.Valid(b) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") + } + // send normal token requests from now on because IMDS responded c.probeIMDS = false } @@ -228,7 +250,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi resp, err := c.azClient.Pipeline().Do(msg) if err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { @@ -239,7 +261,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi switch resp.StatusCode { case http.StatusBadRequest: if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { @@ -256,7 +278,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) } func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { @@ -284,10 +306,10 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac if expiresOn, err := strconv.Atoi(v); err == nil { return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) default: msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) } } @@ -302,15 +324,15 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage key, err := c.getAzureArcSecretKey(ctx, scopes) if err != nil { msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) - return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) } - return c.createAzureArcAuthRequest(ctx, id, scopes, key) + return c.createAzureArcAuthRequest(ctx, scopes, key) case msiTypeAzureML: return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: - return c.createServiceFabricAuthRequest(ctx, id, scopes) + return c.createServiceFabricAuthRequest(ctx, scopes) case msiTypeCloudShell: - return c.createCloudShellAuthRequest(ctx, id, scopes) + return c.createCloudShellAuthRequest(ctx, scopes) default: return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") } @@ -323,13 +345,16 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", imdsAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) + q.Set("api-version", imdsAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) if id != nil { - if id.idKind() == miResourceID { - q.Add(msiResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("object_id", id.String()) + case miResourceID: + q.Set(msiResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -343,13 +368,16 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) q := request.Raw().URL.Query() - q.Add("api-version", "2019-08-01") - q.Add("resource", scopes[0]) + q.Set("api-version", "2019-08-01") + q.Set("resource", scopes[0]) if id != nil { - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("principal_id", id.String()) + case miResourceID: + q.Set(miResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -363,23 +391,24 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } request.Raw().Header.Set("secret", os.Getenv(msiSecret)) q := request.Raw().URL.Query() - q.Add("api-version", "2017-09-01") - q.Add("resource", strings.Join(scopes, " ")) - q.Add("clientid", os.Getenv(defaultIdentityClientID)) + q.Set("api-version", "2017-09-01") + q.Set("resource", strings.Join(scopes, " ")) + q.Set("clientid", os.Getenv(defaultIdentityClientID)) if id != nil { - if id.idKind() == miResourceID { - log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") - q.Set("clientid", "") - q.Set(miResID, id.String()) - } else { + switch id.idKind() { + case miClientID: q.Set("clientid", id.String()) + case miObjectID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil) + case miResourceID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil) } } request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -387,16 +416,8 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte q := request.Raw().URL.Query() request.Raw().Header.Set("Accept", "application/json") request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) - q.Add("api-version", serviceFabricAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", serviceFabricAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } @@ -409,8 +430,8 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key response, err := c.azClient.Pipeline().Do(request) @@ -421,39 +442,39 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour // of the secret key file. Any other status code indicates an error in the request. if response.StatusCode != 401 { msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) - return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key _, p, found := strings.Cut(header, "=") if !found { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil) } expected, err := arcKeyDirectory() if err != nil { return "", err } if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil) } f, err := os.Stat(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil) } if s := f.Size(); s > 4096 { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil) } key, err := os.ReadFile(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil) } return string(key), nil } -func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -461,21 +482,13 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i request.Raw().Header.Set(headerMetadata, "true") request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err @@ -488,14 +501,5 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { return nil, err } - if id != nil { - log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") - q := request.Raw().URL.Query() - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } return request, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 13c043d8e..1d53579cf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -22,8 +22,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential" type managedIdentityIDKind int const ( - miClientID managedIdentityIDKind = 0 - miResourceID managedIdentityIDKind = 1 + miClientID managedIdentityIDKind = iota + miObjectID + miResourceID ) // ManagedIDKind identifies the ID of a managed identity as either a client or resource ID @@ -32,7 +33,12 @@ type ManagedIDKind interface { idKind() managedIdentityIDKind } -// ClientID is the client ID of a user-assigned managed identity. +// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ClientID is specified on the following platforms: +// +// - Azure Arc +// - Cloud Shell +// - Service Fabric type ClientID string func (ClientID) idKind() managedIdentityIDKind { @@ -44,7 +50,31 @@ func (c ClientID) String() string { return string(c) } -// ResourceID is the resource ID of a user-assigned managed identity. +// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when an ObjectID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric +type ObjectID string + +func (ObjectID) idKind() managedIdentityIDKind { + return miObjectID +} + +// String returns the string value of the ID. +func (o ObjectID) String() string { + return string(o) +} + +// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ResourceID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric type ResourceID string func (ResourceID) idKind() managedIdentityIDKind { @@ -60,9 +90,10 @@ func (r ResourceID) String() string { type ManagedIdentityCredentialOptions struct { azcore.ClientOptions - // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity - // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that - // some platforms don't accept resource IDs. + // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of + // the hosting environment's default. The value may be the identity's client, object, or resource ID. + // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed + // identities, or the specified kind of ID. ID ManagedIDKind // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have @@ -73,10 +104,11 @@ type ManagedIdentityCredentialOptions struct { dac bool } -// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview +// user-assigned identity. +// +// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index b3d22dbf3..73363e1c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -30,12 +30,12 @@ type publicClientOptions struct { azcore.ClientOptions AdditionallyAllowedTenants []string + Cache Cache DeviceCodePrompt func(context.Context, DeviceCodeMessage) error DisableAutomaticAuthentication bool DisableInstanceDiscovery bool LoginHint, RedirectURL string - Record authenticationRecord - TokenCachePersistenceOptions *tokenCachePersistenceOptions + Record AuthenticationRecord Username, Password string } @@ -48,7 +48,7 @@ type publicClient struct { host string name string opts publicClientOptions - record authenticationRecord + record AuthenticationRecord azClient *azcore.Client } @@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p }, nil } -func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) { if tro == nil { tro = &policy.TokenRequestOptions{} } if len(tro.Scopes) == 0 { if p.defaultScope == nil { - return authenticationRecord{}, errScopeRequired + return AuthenticationRecord{}, errScopeRequired } tro.Scopes = p.defaultScope } client, mu, err := p.client(*tro) if err != nil { - return authenticationRecord{}, err + return AuthenticationRecord{}, err } mu.Lock() defer mu.Unlock() @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) + return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { @@ -222,13 +222,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { - cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + c, err := internal.ExportReplace(p.opts.Cache, enableCAE) if err != nil { return nil, err } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithCache(cache), + public.WithCache(c), public.WithHTTPClient(p), } if enableCAE { @@ -244,8 +244,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke if err == nil { p.record, err = newAuthenticationRecord(ar) } else { - res := getResponseFromError(err) - err = newAuthenticationFailedError(p.name, err.Error(), res, err) + err = newAuthenticationFailedErrorFromMSAL(p.name, err) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index a69bbce34..1a07fede6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -5,7 +5,19 @@ param ( [hashtable] $AdditionalParameters = @{}, - [hashtable] $DeploymentOutputs + [hashtable] $DeploymentOutputs, + + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $TenantId, + + [Parameter()] + [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] + [string] $TestApplicationId, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments ) $ErrorActionPreference = 'Stop' @@ -16,14 +28,14 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] } Write-Host "Building container" $image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" -FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder ENV GOARCH=amd64 GOWORK=off COPY . /azidentity WORKDIR /azidentity/testdata/managed-id-test @@ -53,9 +65,11 @@ az container create -g $rg -n $aciName --image $image ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` - AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` - AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` - FUNCTIONS_CUSTOMHANDLER_PORT=80 + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index 2a2165293..135feb017 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' value: deployResources ? usermgdid.id : null } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID' + value: deployResources ? usermgdid.properties.clientId : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID' + value: deployResources ? usermgdid.properties.principalId : null + } { name: 'AzureWebJobsStorage' value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' @@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 294ed81e9..740abd470 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -25,18 +25,20 @@ type UsernamePasswordCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -54,13 +56,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Record: options.authenticationRecord, - TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.AuthenticationRecord, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -70,7 +72,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st } // Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 4305b5d3d..4fa22dcc1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.7.0" + version = "v1.8.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 3e43e788e..6fecada2f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string @@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md new file mode 100644 index 000000000..9515ee520 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md @@ -0,0 +1,3 @@ +# GCP Resource detection library + +This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go new file mode 100644 index 000000000..0a3680703 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go @@ -0,0 +1,76 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +const ( + // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules + // for the environment variables available in GAE environments. + gaeServiceEnv = "GAE_SERVICE" + gaeVersionEnv = "GAE_VERSION" + gaeInstanceEnv = "GAE_INSTANCE" + gaeEnv = "GAE_ENV" + gaeStandard = "standard" +) + +func (d *Detector) onAppEngineStandard() bool { + // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables. + env, found := d.os.LookupEnv(gaeEnv) + return found && env == gaeStandard +} + +func (d *Detector) onAppEngine() bool { + _, found := d.os.LookupEnv(gaeServiceEnv) + return found +} + +// AppEngineServiceName returns the service name of the app engine service. +func (d *Detector) AppEngineServiceName() (string, error) { + if name, found := d.os.LookupEnv(gaeServiceEnv); found { + return name, nil + } + return "", errEnvVarNotFound +} + +// AppEngineServiceVersion returns the service version of the app engine service. +func (d *Detector) AppEngineServiceVersion() (string, error) { + if version, found := d.os.LookupEnv(gaeVersionEnv); found { + return version, nil + } + return "", errEnvVarNotFound +} + +// AppEngineServiceInstance returns the service instance of the app engine service. +func (d *Detector) AppEngineServiceInstance() (string, error) { + if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found { + return instanceID, nil + } + return "", errEnvVarNotFound +} + +// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running. +func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) { + // The GCE metadata server is available on App Engine Flex. + return d.GCEAvailabilityZoneAndRegion() +} + +// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in. +func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) { + return d.metadata.Zone() +} + +// AppEngineStandardCloudRegion returns the region the app engine service is running in. +func (d *Detector) AppEngineStandardCloudRegion() (string, error) { + return d.FaaSCloudRegion() +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go new file mode 100644 index 000000000..d3992a4f7 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +const ( + bmsProjectIDEnv = "BMS_PROJECT_ID" + bmsRegionEnv = "BMS_REGION" + bmsInstanceIDEnv = "BMS_INSTANCE_ID" +) + +// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying +// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables. +// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs +func (d *Detector) onBareMetalSolution() bool { + projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv) + region, regionExists := d.os.LookupEnv(bmsRegionEnv) + instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv) + return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != "" +} + +// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable. +func (d *Detector) BareMetalSolutionInstanceID() (string, error) { + if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found { + return instanceID, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable. +func (d *Detector) BareMetalSolutionCloudRegion() (string, error) { + if region, found := d.os.LookupEnv(bmsRegionEnv); found { + return region, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable. +func (d *Detector) BareMetalSolutionProjectID() (string, error) { + if project, found := d.os.LookupEnv(bmsProjectIDEnv); found { + return project, nil + } + return "", errEnvVarNotFound +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go new file mode 100644 index 000000000..2cc62de09 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go @@ -0,0 +1,102 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "errors" + "os" + + "cloud.google.com/go/compute/metadata" +) + +var errEnvVarNotFound = errors.New("environment variable not found") + +// NewDetector returns a *Detector which can get detect the platform, +// and fetch attributes of the platform on which it is running. +func NewDetector() *Detector { + return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}} +} + +type Platform int64 + +const ( + UnknownPlatform Platform = iota + GKE + GCE + CloudRun + CloudRunJob + CloudFunctions + AppEngineStandard + AppEngineFlex + BareMetalSolution +) + +// CloudPlatform returns the platform on which this program is running. +func (d *Detector) CloudPlatform() Platform { + switch { + case d.onBareMetalSolution(): + return BareMetalSolution + case d.onGKE(): + return GKE + case d.onCloudFunctions(): + return CloudFunctions + case d.onCloudRun(): + return CloudRun + case d.onCloudRunJob(): + return CloudRunJob + case d.onAppEngineStandard(): + return AppEngineStandard + case d.onAppEngine(): + return AppEngineFlex + case d.onGCE(): + return GCE + } + return UnknownPlatform +} + +// ProjectID returns the ID of the project in which this program is running. +func (d *Detector) ProjectID() (string, error) { + return d.metadata.ProjectID() +} + +// Detector collects resource information for all GCP platforms. +type Detector struct { + metadata metadataProvider + os osProvider +} + +// metadataProvider contains the subset of the metadata.Client functions used +// by this resource Detector to allow testing with a fake implementation. +type metadataProvider interface { + ProjectID() (string, error) + InstanceID() (string, error) + Get(string) (string, error) + InstanceName() (string, error) + Hostname() (string, error) + Zone() (string, error) + InstanceAttributeValue(string) (string, error) +} + +// osProvider contains the subset of the os package functions used by. +type osProvider interface { + LookupEnv(string) (string, bool) +} + +// realOSProvider uses the os package to lookup env vars. +type realOSProvider struct{} + +func (realOSProvider) LookupEnv(env string) (string, bool) { + return os.LookupEnv(env) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go new file mode 100644 index 000000000..9277608dd --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go @@ -0,0 +1,105 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "strings" +) + +const ( + // Cloud Functions env vars: + // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes + // + // Cloud Run env vars: + // https://cloud.google.com/run/docs/container-contract#services-env-vars + // + // Cloud Run jobs env vars: + // https://cloud.google.com/run/docs/container-contract#jobs-env-vars + cloudFunctionsTargetEnv = "FUNCTION_TARGET" + cloudRunConfigurationEnv = "K_CONFIGURATION" + cloudRunJobsEnv = "CLOUD_RUN_JOB" + faasServiceEnv = "K_SERVICE" + faasRevisionEnv = "K_REVISION" + cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION" + cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX" + regionMetadataAttr = "instance/region" +) + +func (d *Detector) onCloudFunctions() bool { + _, found := d.os.LookupEnv(cloudFunctionsTargetEnv) + return found +} + +func (d *Detector) onCloudRun() bool { + _, found := d.os.LookupEnv(cloudRunConfigurationEnv) + return found +} + +func (d *Detector) onCloudRunJob() bool { + _, found := d.os.LookupEnv(cloudRunJobsEnv) + return found +} + +// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service. +func (d *Detector) FaaSName() (string, error) { + if name, found := d.os.LookupEnv(faasServiceEnv); found { + return name, nil + } + if name, found := d.os.LookupEnv(cloudRunJobsEnv); found { + return name, nil + } + return "", errEnvVarNotFound +} + +// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service. +func (d *Detector) FaaSVersion() (string, error) { + if version, found := d.os.LookupEnv(faasRevisionEnv); found { + return version, nil + } + return "", errEnvVarNotFound +} + +// CloudRunJobExecution returns the execution id of the Cloud Run jobs. +func (d *Detector) CloudRunJobExecution() (string, error) { + if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found { + return eid, nil + } + return "", errEnvVarNotFound +} + +// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs. +func (d *Detector) CloudRunJobTaskIndex() (string, error) { + if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found { + return tidx, nil + } + return "", errEnvVarNotFound +} + +// FaaSID returns the instance id of the Cloud Run or Cloud Function. +func (d *Detector) FaaSID() (string, error) { + return d.metadata.InstanceID() +} + +// FaaSCloudRegion detects region from the metadata server. +// It is in the format /projects//regions/. +// +// https://cloud.google.com/run/docs/reference/container-contract#metadata-server +func (d *Detector) FaaSCloudRegion() (string, error) { + region, err := d.metadata.Get(regionMetadataAttr) + if err != nil { + return "", err + } + return region[strings.LastIndex(region, "/")+1:], nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go new file mode 100644 index 000000000..37259fc45 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go @@ -0,0 +1,75 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "fmt" + "strings" +) + +// See the available GCE instance metadata: +// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata +const machineTypeMetadataAttr = "instance/machine-type" + +func (d *Detector) onGCE() bool { + _, err := d.metadata.Get(machineTypeMetadataAttr) + return err == nil +} + +// GCEHostType returns the machine type of the instance on which this program is running. +func (d *Detector) GCEHostType() (string, error) { + return d.metadata.Get(machineTypeMetadataAttr) +} + +// GCEHostID returns the instance ID of the instance on which this program is running. +func (d *Detector) GCEHostID() (string, error) { + return d.metadata.InstanceID() +} + +// GCEHostName returns the instance name of the instance on which this program is running. +// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which +// value is returned. +func (d *Detector) GCEHostName() (string, error) { + return d.metadata.InstanceName() +} + +// GCEInstanceName returns the instance name of the instance on which this program is running. +// This is the value visible in the Cloud Console UI, and the prefix for the default hostname +// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func (d *Detector) GCEInstanceName() (string, error) { + return d.metadata.InstanceName() +} + +// GCEInstanceHostname returns the full value of the default or custom hostname of the instance +// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm. +func (d *Detector) GCEInstanceHostname() (string, error) { + return d.metadata.Hostname() +} + +// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running. +func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { + zone, err := d.metadata.Zone() + if err != nil { + return "", "", err + } + if zone == "" { + return "", "", fmt.Errorf("no zone detected from GCE metadata server") + } + splitZone := strings.SplitN(zone, "-", 3) + if len(splitZone) != 3 { + return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone) + } + return zone, strings.Join(splitZone[0:2], "-"), nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go new file mode 100644 index 000000000..67ed972b2 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go @@ -0,0 +1,70 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "fmt" + "strings" +) + +const ( + // If the kubernetes.default.svc service exists in the cluster, + // then the KUBERNETES_SERVICE_HOST env var will be populated. + // Use this as an indication that we are running on kubernetes. + k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST" + // See the available GKE metadata: + // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata + clusterNameMetadataAttr = "cluster-name" + clusterLocationMetadataAttr = "cluster-location" +) + +func (d *Detector) onGKE() bool { + _, found := d.os.LookupEnv(k8sServiceHostEnv) + return found +} + +// GKEHostID returns the instance ID of the instance on which this program is running. +func (d *Detector) GKEHostID() (string, error) { + return d.GCEHostID() +} + +// GKEClusterName returns the name if the GKE cluster in which this program is running. +func (d *Detector) GKEClusterName() (string, error) { + return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr) +} + +type LocationType int64 + +const ( + UndefinedLocation LocationType = iota + Zone + Region +) + +// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional. +func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) { + clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr) + if err != nil { + return "", UndefinedLocation, err + } + switch strings.Count(clusterLocation, "-") { + case 1: + return clusterLocation, Region, nil + case 2: + return clusterLocation, Zone, nil + default: + return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation) + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md new file mode 100644 index 000000000..c77d5eb15 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md @@ -0,0 +1,37 @@ +# OpenTelemetry Google Cloud Monitoring Exporter + +[![Docs](https://godoc.org/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric?status.svg)](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric) +[![Apache License][license-image]][license-url] + +OpenTelemetry Google Cloud Monitoring Exporter allow the user to send collected metrics to Google Cloud. + +[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more. + +## Setup + +Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create). + +## Authentication + +The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`: + +* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. +* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`. + +When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.: + +```golang +projectID := os.Getenv("GOOGLE_CLOUD_PROJECT") +opts := []mexporter.Option{ + mexporter.WithProjectID(projectID), +} +``` + +## Useful links + +* For more information on OpenTelemetry, visit: https://opentelemetry.io/ +* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go +* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring + +[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE +[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go new file mode 100644 index 000000000..90dfcb344 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go @@ -0,0 +1,49 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "context" + "errors" + "fmt" + + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "golang.org/x/oauth2/google" +) + +// New creates a new Exporter thats implements metric.Exporter. +func New(opts ...Option) (sdkmetric.Exporter, error) { + o := options{ + context: context.Background(), + resourceAttributeFilter: DefaultResourceAttributesFilter, + } + for _, opt := range opts { + opt(&o) + } + + if o.projectID == "" { + creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("google cloud monitoring: no project found with application default credentials") + } + o.projectID = creds.ProjectID + } + return newMetricExporter(&o) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go new file mode 100644 index 000000000..57329a4bd --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go @@ -0,0 +1,97 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +// TODO: remove this file when the constants are ready in the Go SDK + +// Mappings for the well-known OpenTelemetry resource label keys +// to applicable Monitored Resource label keys. +// A uniquely identifying name for the Kubernetes cluster. Kubernetes +// does not have cluster names as an internal concept so this may be +// set to any meaningful value within the environment. For example, +// GKE clusters have a name which can be used for this label. +const ( + // Deprecated: use semconv.CloudProviderKey instead. + CloudKeyProvider = "cloud.provider" + // Deprecated: use semconv.CloudAccountIDKey instead. + CloudKeyAccountID = "cloud.account.id" + // Deprecated: use semconv.CloudRegionKey instead. + CloudKeyRegion = "cloud.region" + // Deprecated: use semconv.CloudAvailabilityZoneKey instead. + CloudKeyZone = "cloud.availability_zone" + + // Deprecated: use semconv.ServiceNamespaceKey instead. + ServiceKeyNamespace = "service.namespace" + // Deprecated: use semconv.ServiceInstanceIDKey instead. + ServiceKeyInstanceID = "service.instance.id" + // Deprecated: use semconv.ServiceNameKey instead. + ServiceKeyName = "service.name" + + // Deprecated: HostType is not needed. + HostType = "host" + // A uniquely identifying name for the host. + // Deprecated: use semconv.HostNameKey instead. + HostKeyName = "host.name" + // A hostname as returned by the 'hostname' command on host machine. + // Deprecated: HostKeyHostName is not needed. + HostKeyHostName = "host.hostname" + // Deprecated: use semconv.HostIDKey instead. + HostKeyID = "host.id" + // Deprecated: use semconv.HostTypeKey instead. + HostKeyType = "host.type" + + // A uniquely identifying name for the Container. + // Deprecated: use semconv.ContainerNameKey instead. + ContainerKeyName = "container.name" + // Deprecated: use semconv.ContainerImageNameKey instead. + ContainerKeyImageName = "container.image.name" + // Deprecated: use semconv.ContainerImageTagKey instead. + ContainerKeyImageTag = "container.image.tag" + + // Cloud Providers + // Deprecated: use semconv.CloudProviderAWS instead. + CloudProviderAWS = "aws" + // Deprecated: use semconv.CloudProviderGCP instead. + CloudProviderGCP = "gcp" + // Deprecated: use semconv.CloudProviderAzure instead. + CloudProviderAZURE = "azure" + + // Deprecated: Use "k8s" instead. This should not be needed. + K8S = "k8s" + // Deprecated: use semconv.K8SClusterNameKey instead. + K8SKeyClusterName = "k8s.cluster.name" + // Deprecated: use semconv.K8SNamespaceNameKey instead. + K8SKeyNamespaceName = "k8s.namespace.name" + // Deprecated: use semconv.K8SPodNameKey instead. + K8SKeyPodName = "k8s.pod.name" + // Deprecated: use semconv.K8SDeploymentNameKey instead. + K8SKeyDeploymentName = "k8s.deployment.name" + + // Monitored Resources types + // Deprecated: Use "k8s_container" instead. + K8SContainer = "k8s_container" + // Deprecated: Use "k8s_node" instead. + K8SNode = "k8s_node" + // Deprecated: Use "k8s_pod" instead. + K8SPod = "k8s_pod" + // Deprecated: Use "k8s_cluster" instead. + K8SCluster = "k8s_cluster" + // Deprecated: Use "gce_instance" instead. + GCEInstance = "gce_instance" + // Deprecated: Use "aws_ec2_instance" instead. + AWSEC2Instance = "aws_ec2_instance" + // Deprecated: Use "generic_task" instead. + GenericTask = "generic_task" +) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go new file mode 100644 index 000000000..974c0af95 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go @@ -0,0 +1,32 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "errors" + "fmt" +) + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") +) + +type errUnexpectedAggregationKind struct { + kind string +} + +func (e errUnexpectedAggregationKind) Error() string { + return fmt.Sprintf("the metric kind is unexpected: %v", e.kind) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go new file mode 100644 index 000000000..ba0012e25 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go @@ -0,0 +1,890 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/api/distribution" + "google.golang.org/genproto/googleapis/api/label" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping" +) + +const ( + // The number of timeserieses to send to GCM in a single request. This + // is a hard limit in the GCM API, so we never want to exceed 200. + sendBatchSize = 200 + + cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s" + platformMappingMonitoredResourceKey = "gcp.resource_type" +) + +// key is used to judge the uniqueness of the record descriptor. +type key struct { + name string + libraryname string +} + +func keyOf(metrics metricdata.Metrics, library instrumentation.Library) key { + return key{ + name: metrics.Name, + libraryname: library.Name, + } +} + +// metricExporter is the implementation of OpenTelemetry metric exporter for +// Google Cloud Monitoring. +type metricExporter struct { + o *options + shutdown chan struct{} + // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD. + mdCache map[key]*googlemetricpb.MetricDescriptor + client *monitoring.MetricClient + mdLock sync.RWMutex + shutdownOnce sync.Once +} + +// ForceFlush does nothing, the exporter holds no state. +func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() } + +// Shutdown shuts down the client connections. +func (e *metricExporter) Shutdown(ctx context.Context) error { + err := errShutdown + e.shutdownOnce.Do(func() { + close(e.shutdown) + err = errors.Join(ctx.Err(), e.client.Close()) + }) + return err +} + +// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring. +func newMetricExporter(o *options) (*metricExporter, error) { + if strings.TrimSpace(o.projectID) == "" { + return nil, errBlankProjectID + } + + clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...) + ctx := o.context + if ctx == nil { + ctx = context.Background() + } + client, err := monitoring.NewMetricClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + if o.compression == "gzip" { + client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + } + + cache := map[key]*googlemetricpb.MetricDescriptor{} + e := &metricExporter{ + o: o, + mdCache: cache, + client: client, + shutdown: make(chan struct{}), + } + return e, nil +} + +var errShutdown = fmt.Errorf("exporter is shutdown") + +// Export exports OpenTelemetry Metrics to Google Cloud Monitoring. +func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + select { + case <-me.shutdown: + return errShutdown + default: + } + + if me.o.destinationProjectQuota { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")})) + } + return errors.Join( + me.exportMetricDescriptor(ctx, rm), + me.exportTimeSeries(ctx, rm), + ) +} + +// Temporality returns the Temporality to use for an instrument kind. +func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality { + return metric.DefaultTemporalitySelector(ik) +} + +// Aggregation returns the Aggregation to use for an instrument kind. +func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation { + return metric.DefaultAggregationSelector(ik) +} + +// exportMetricDescriptor create MetricDescriptor from the record +// if the descriptor is not registered in Cloud Monitoring yet. +func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error { + // We only send metric descriptors if we're configured *and* we're not sending service timeseries. + if me.o.disableCreateMetricDescriptors { + return nil + } + + me.mdLock.Lock() + defer me.mdLock.Unlock() + mds := make(map[key]*googlemetricpb.MetricDescriptor) + extraLabels := me.extraLabelsFromResource(rm.Resource) + for _, scope := range rm.ScopeMetrics { + for _, metrics := range scope.Metrics { + k := keyOf(metrics, scope.Scope) + + if _, ok := me.mdCache[k]; ok { + continue + } + + if _, localok := mds[k]; !localok { + md := me.recordToMdpb(metrics, extraLabels) + mds[k] = md + } + } + } + + // TODO: This process is synchronous and blocks longer time if records in cps + // have many different descriptors. In the cps.ForEach above, it should spawn + // goroutines to send CreateMetricDescriptorRequest asynchronously in the case + // the descriptor does not exist in global cache (me.mdCache). + // See details in #26. + var errs []error + for kmd, md := range mds { + err := me.createMetricDescriptorIfNeeded(ctx, md) + if err == nil { + me.mdCache[kmd] = md + } + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error { + mdReq := &monitoringpb.GetMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type), + } + _, err := me.client.GetMetricDescriptor(ctx, mdReq) + if err == nil { + // If the metric descriptor already exists, skip the CreateMetricDescriptor call. + // Metric descriptors cannot be updated without deleting them first, so there + // isn't anything we can do here: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify + return nil + } + req := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", me.o.projectID), + MetricDescriptor: md, + } + _, err = me.client.CreateMetricDescriptor(ctx, req) + return err +} + +// exportTimeSeries create TimeSeries from the records in cps. +// res should be the common resource among all TimeSeries, such as instance id, application name and so on. +func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error { + tss, err := me.recordsToTspbs(rm) + if len(tss) == 0 { + return err + } + + name := fmt.Sprintf("projects/%s", me.o.projectID) + + errs := []error{err} + for i := 0; i < len(tss); i += sendBatchSize { + j := i + sendBatchSize + if j >= len(tss) { + j = len(tss) + } + + // TODO: When this exporter is rewritten, support writing to multiple + // projects based on the "gcp.project.id" resource. + req := &monitoringpb.CreateTimeSeriesRequest{ + Name: name, + TimeSeries: tss[i:j], + } + if me.o.createServiceTimeSeries { + errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req)) + } else { + errs = append(errs, me.client.CreateTimeSeries(ctx, req)) + } + } + + return errors.Join(errs...) +} + +func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set { + set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter) + return &set +} + +// descToMetricType converts descriptor to MetricType proto type. +// Basically this returns default value ("workload.googleapis.com/[metric type]"). +func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string { + if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil { + return formatter(desc) + } + return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name) +} + +// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name. +func metricTypeToDisplayName(mURL string) string { + // strip domain, keep path after domain. + u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL)) + if err != nil || u.Path == "" { + return mURL + } + return strings.TrimLeft(u.Path, "/") +} + +// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor. +func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor { + name := metrics.Name + typ := me.descToMetricType(metrics) + kind, valueType := recordToMdpbKindType(metrics.Data) + + // Detailed explanations on MetricDescriptor proto is not documented on + // generated Go packages. Refer to the original proto file. + // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33 + return &googlemetricpb.MetricDescriptor{ + Name: name, + DisplayName: metricTypeToDisplayName(typ), + Type: typ, + MetricKind: kind, + ValueType: valueType, + Unit: string(metrics.Unit), + Description: metrics.Description, + Labels: labelDescriptors(metrics, extraLabels), + } +} + +func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor { + labels := []*label.LabelDescriptor{} + seenKeys := map[string]struct{}{} + addAttributes := func(attr *attribute.Set) { + iter := attr.Iter() + for iter.Next() { + kv := iter.Attribute() + // Skip keys that have already been set + if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok { + continue + } + labels = append(labels, &label.LabelDescriptor{ + Key: normalizeLabelKey(string(kv.Key)), + }) + seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{} + } + } + addAttributes(extraLabels) + switch a := metrics.Data.(type) { + case metricdata.Gauge[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Gauge[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Sum[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Sum[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Histogram[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Histogram[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + } + return labels +} + +type attributes struct { + attrs attribute.Set +} + +func (attrs *attributes) GetString(key string) (string, bool) { + value, ok := attrs.attrs.Value(attribute.Key(key)) + return value.AsString(), ok +} + +// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource +// proto type for Cloud Monitoring. +// +// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors +func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource { + platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey) + + // check if platform mapping is requested and possible + if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType { + // assemble attributes required to construct this MR + attributeMap := make(map[string]string) + for expectedLabel := range me.o.monitoredResourceDescription.mrLabels { + value, found := res.Set().Value(attribute.Key(expectedLabel)) + if found { + attributeMap[expectedLabel] = value.AsString() + } + } + return &monitoredrespb.MonitoredResource{ + Type: platformMrType.AsString(), + Labels: attributeMap, + } + } + + gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{ + attrs: attribute.NewSet(res.Attributes()...), + }) + newLabels := make(map[string]string, len(gmr.Labels)) + for k, v := range gmr.Labels { + newLabels[k] = sanitizeUTF8(v) + } + mr := &monitoredrespb.MonitoredResource{ + Type: gmr.Type, + Labels: newLabels, + } + return mr +} + +// recordToMdpbKindType return the mapping from OTel's record descriptor to +// Cloud Monitoring's MetricKind and ValueType. +func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { + switch agg := a.(type) { + case metricdata.Gauge[int64]: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + case metricdata.Gauge[float64]: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + case metricdata.Sum[int64]: + if agg.IsMonotonic { + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 + } + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + case metricdata.Sum[float64]: + if agg.IsMonotonic { + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE + } + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + case metricdata.Histogram[int64], metricdata.Histogram[float64]: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION + default: + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } +} + +// recordToMpb converts data from records to Metric proto type for Cloud Monitoring. +func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Library, extraLabels *attribute.Set) *googlemetricpb.Metric { + me.mdLock.RLock() + defer me.mdLock.RUnlock() + k := keyOf(metrics, library) + md, ok := me.mdCache[k] + if !ok { + md = me.recordToMdpb(metrics, extraLabels) + } + + labels := make(map[string]string) + addAttributes := func(attr *attribute.Set) { + iter := attr.Iter() + for iter.Next() { + kv := iter.Attribute() + labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit()) + } + } + addAttributes(extraLabels) + addAttributes(&attributes) + + return &googlemetricpb.Metric{ + Type: md.Type, + Labels: labels, + } +} + +// recordToTspb converts record to TimeSeries proto type with common resource. +// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries +func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) { + var tss []*monitoringpb.TimeSeries + var errs []error + if m.Data == nil { + return nil, nil + } + switch a := m.Data.(type) { + case metricdata.Gauge[int64]: + for _, point := range a.DataPoints { + ts, err := gaugeToTimeSeries[int64](point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Gauge[float64]: + for _, point := range a.DataPoints { + ts, err := gaugeToTimeSeries[float64](point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Sum[int64]: + for _, point := range a.DataPoints { + var ts *monitoringpb.TimeSeries + var err error + if a.IsMonotonic { + ts, err = sumToTimeSeries[int64](point, m, mr) + } else { + // Send non-monotonic sums as gauges + ts, err = gaugeToTimeSeries[int64](point, m, mr) + } + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Sum[float64]: + for _, point := range a.DataPoints { + var ts *monitoringpb.TimeSeries + var err error + if a.IsMonotonic { + ts, err = sumToTimeSeries[float64](point, m, mr) + } else { + // Send non-monotonic sums as gauges + ts, err = gaugeToTimeSeries[float64](point, m, mr) + } + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Histogram[int64]: + for _, point := range a.DataPoints { + ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Histogram[float64]: + for _, point := range a.DataPoints { + ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.ExponentialHistogram[int64]: + for _, point := range a.DataPoints { + ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.ExponentialHistogram[float64]: + for _, point := range a.DataPoints { + ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + default: + errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()}) + } + return tss, errors.Join(errs...) +} + +func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) { + mr := me.resourceToMonitoredResourcepb(rm.Resource) + extraLabels := me.extraLabelsFromResource(rm.Resource) + + var ( + tss []*monitoringpb.TimeSeries + errs []error + ) + for _, scope := range rm.ScopeMetrics { + for _, metrics := range scope.Metrics { + ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels) + errs = append(errs, err) + tss = append(tss, ts...) + } + } + + return tss, errors.Join(errs...) +} + +func sanitizeUTF8(s string) string { + return strings.ToValidUTF8(s, "�") +} + +func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + value, valueType := numberDataPointToValue(point) + timestamp := timestamppb.New(point.Time) + if err := timestamp.CheckValid(); err != nil { + return nil, err + } + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_GAUGE, + ValueType: valueType, + Points: []*monitoringpb.Point{{ + Interval: &monitoringpb.TimeInterval{ + EndTime: timestamp, + }, + Value: value, + }}, + }, nil +} + +func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + value, valueType := numberDataPointToValue[N](point) + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: valueType, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: value, + }}, + }, nil +} + +// TODO(@dashpole): Refactor to pass control-coupling lint check. +// +//nolint:revive +func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + distributionValue := histToDistribution(point, projectID) + if enableSOSD { + setSumOfSquaredDeviation(point, distributionValue) + } + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: distributionValue, + }, + }, + }}, + }, nil +} + +func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + distributionValue := expHistToDistribution(point, projectID) + // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality. + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: distributionValue, + }, + }, + }}, + }, nil +} + +func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) { + // The end time of a new interval must be at least a millisecond after the end time of the + // previous interval, for all non-gauge types. + // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval + if end.Sub(start).Milliseconds() <= 1 { + end = start.Add(time.Millisecond) + } + startpb := timestamppb.New(start) + endpb := timestamppb.New(end) + err := errors.Join( + startpb.CheckValid(), + endpb.CheckValid(), + ) + if err != nil { + return nil, err + } + + return &monitoringpb.TimeInterval{ + StartTime: startpb, + EndTime: endpb, + }, nil +} + +func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution { + counts := make([]int64, len(hist.BucketCounts)) + for i, v := range hist.BucketCounts { + counts[i] = int64(v) + } + var mean float64 + if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero + mean = float64(hist.Sum) / float64(hist.Count) + } + return &distribution.Distribution{ + Count: int64(hist.Count), + Mean: mean, + BucketCounts: counts, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: hist.Bounds, + }, + }, + }, + Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID), + } +} + +func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution { + // First calculate underflow bucket with all negatives + zeros. + underflow := hist.ZeroCount + negativeBuckets := hist.NegativeBucket.Counts + for i := 0; i < len(negativeBuckets); i++ { + underflow += negativeBuckets[i] + } + + // Next, pull in remaining buckets. + counts := make([]int64, len(hist.PositiveBucket.Counts)+2) + bucketOptions := &distribution.Distribution_BucketOptions{} + counts[0] = int64(underflow) + positiveBuckets := hist.PositiveBucket.Counts + for i := 0; i < len(positiveBuckets); i++ { + counts[i+1] = int64(positiveBuckets[i]) + } + // Overflow bucket is always empty + counts[len(counts)-1] = 0 + + if len(hist.PositiveBucket.Counts) == 0 { + // We cannot send exponential distributions with no positive buckets, + // instead we send a simple overflow/underflow histogram. + bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: []float64{0}, + }, + } + } else { + // Exponential histogram + growth := math.Exp2(math.Exp2(-float64(hist.Scale))) + scale := math.Pow(growth, float64(hist.PositiveBucket.Offset)) + bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{ + ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{ + GrowthFactor: growth, + Scale: scale, + NumFiniteBuckets: int32(len(counts) - 2), + }, + } + } + + var mean float64 + if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero + mean = float64(hist.Sum) / float64(hist.Count) + } + + return &distribution.Distribution{ + Count: int64(hist.Count), + Mean: mean, + BucketCounts: counts, + BucketOptions: bucketOptions, + Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID), + } +} + +func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar { + var exemplars []*distribution.Distribution_Exemplar + for _, e := range Exemplars { + attachments := []*anypb.Any{} + if hasValidSpanContext(e) { + sctx, err := anypb.New(&monitoringpb.SpanContext{ + SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])), + }) + if err == nil { + attachments = append(attachments, sctx) + } + } + if len(e.FilteredAttributes) > 0 { + attr, err := anypb.New(&monitoringpb.DroppedLabels{ + Label: attributesToLabels(e.FilteredAttributes), + }) + if err == nil { + attachments = append(attachments, attr) + } + } + exemplars = append(exemplars, &distribution.Distribution_Exemplar{ + Value: float64(e.Value), + Timestamp: timestamppb.New(e.Time), + Attachments: attachments, + }) + } + sort.Slice(exemplars, func(i, j int) bool { + return exemplars[i].Value < exemplars[j].Value + }) + return exemplars +} + +func attributesToLabels(attrs []attribute.KeyValue) map[string]string { + labels := make(map[string]string, len(attrs)) + for _, attr := range attrs { + labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit()) + } + return labels +} + +var ( + nilTraceID trace.TraceID + nilSpanID trace.SpanID +) + +func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool { + return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:]) +} + +func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) { + var prevBound float64 + // Calculate the sum of squared deviation. + for i := 0; i < len(hist.Bounds); i++ { + // Assume all points in the bucket occur at the middle of the bucket range + middleOfBucket := (prevBound + hist.Bounds[i]) / 2 + dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean) + prevBound = hist.Bounds[i] + } + // The infinity bucket is an implicit +Inf bound after the list of explicit bounds. + // Assume points in the infinity bucket are at the top of the previous bucket + middleOfInfBucket := prevBound + if len(dist.BucketCounts) > 0 { + dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean) + } +} + +func numberDataPointToValue[N int64 | float64]( + point metricdata.DataPoint[N], +) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) { + switch v := any(point.Value).(type) { + case int64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v, + }}, + googlemetricpb.MetricDescriptor_INT64 + case float64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v, + }}, + googlemetricpb.MetricDescriptor_DOUBLE + } + // It is impossible to reach this statement + return nil, googlemetricpb.MetricDescriptor_INT64 +} + +// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149 +// +// > The label key name must follow: +// > +// > * Only upper and lower-case letters, digits and underscores (_) are +// > allowed. +// > * Label name must start with a letter or digit. +// > * The maximum length of a label name is 100 characters. +// +// Note: this does not truncate if a label is too long. +func normalizeLabelKey(s string) string { + if len(s) == 0 { + return s + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore. +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go new file mode 100644 index 000000000..11b96067d --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go @@ -0,0 +1,201 @@ +// Copyright 2020-2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + + apioption "google.golang.org/api/option" +) + +var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version()) + +// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific +// Google Cloud MonitoredResource. +type MonitoredResourceDescription struct { + mrLabels map[string]struct{} + mrType string +} + +// Option is function type that is passed to the exporter initialization function. +type Option func(*options) + +// options is the struct to hold options for metricExporter and its client instance. +type options struct { + // context allows you to provide a custom context for API calls. + // + // This context will be used several times: first, to create Cloud Monitoring + // clients, and then every time a new batch of metrics needs to be uploaded. + // + // If unset, context.Background() will be used. + context context.Context + // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type. + // By default, the format string is "workload.googleapis.com/[metric name]". + metricDescriptorTypeFormatter func(metricdata.Metrics) string + // resourceAttributeFilter determinies which resource attributes to + // add to metrics as metric labels. By default, it adds service.name, + // service.namespace, and service.instance.id. + resourceAttributeFilter attribute.Filter + // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific + // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided + // monitored resource type. + monitoredResourceDescription MonitoredResourceDescription + // projectID is the identifier of the Cloud Monitoring + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials. + // + // It will be used in the project_id label of a Google Cloud Monitoring monitored + // resource if the resource does not inherently belong to a specific + // project, e.g. on-premise resource like k8s_container or generic_task. + projectID string + // compression enables gzip compression on gRPC calls. + compression string + // monitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + monitoringClientOptions []apioption.ClientOption + // destinationProjectQuota sets whether the request should use quota from + // the destination project for the request. + destinationProjectQuota bool + + // disableCreateMetricDescriptors disables automatic MetricDescriptor creation + disableCreateMetricDescriptors bool + + // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared + // deviation. It isn't correct, so we don't send it by default. + enableSumOfSquaredDeviation bool + + // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`. + // Implicitly, this sets `disableCreateMetricDescriptors` to true. + createServiceTimeSeries bool +} + +// WithProjectID sets Google Cloud Platform project as projectID. +// Without using this option, it automatically detects the project ID +// from the default credential detection process. +// Please find the detailed order of the default credentail detection proecess on the doc: +// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials +func WithProjectID(id string) func(o *options) { + return func(o *options) { + o.projectID = id + } +} + +// WithDestinationProjectQuota enables per-request usage of the destination +// project's quota. For example, when setting gcp.project.id on a metric. +func WithDestinationProjectQuota() func(o *options) { + return func(o *options) { + o.destinationProjectQuota = true + } +} + +// WithMonitoringClientOptions add the options for Cloud Monitoring client instance. +// Available options are defined in. +func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) { + return func(o *options) { + o.monitoringClientOptions = append(o.monitoringClientOptions, opts...) + } +} + +// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor. +// Note that the format has to follow the convention defined in the official document. +// The default is "workload.googleapis.com/[metric name]". +// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names +func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) { + return func(o *options) { + o.metricDescriptorTypeFormatter = f + } +} + +// WithFilteredResourceAttributes determinies which resource attributes to +// add to metrics as metric labels. By default, it adds service.name, +// service.namespace, and service.instance.id. This is recommended to avoid +// writing duplicate timeseries against the same monitored resource. Use +// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of +// resource attributes to metric labels. +func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) { + return func(o *options) { + o.resourceAttributeFilter = filter + } +} + +// DefaultResourceAttributesFilter is the default filter applied to resource +// attributes. +func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool { + return (kv.Key == semconv.ServiceNameKey || + kv.Key == semconv.ServiceNamespaceKey || + kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0 +} + +// NoAttributes can be passed to WithFilteredResourceAttributes to disable +// adding resource attributes as metric labels. +func NoAttributes(attribute.KeyValue) bool { + return false +} + +// WithDisableCreateMetricDescriptors will disable the automatic creation of +// MetricDescriptors when an unknown metric is set to be exported. +func WithDisableCreateMetricDescriptors() func(o *options) { + return func(o *options) { + o.disableCreateMetricDescriptors = true + } +} + +// WithCompression sets the compression to use for gRPC requests. +func WithCompression(c string) func(o *options) { + return func(o *options) { + o.compression = c + } +} + +// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms. +// It is an estimate, and is not the actual sum of squared deviations. +func WithSumOfSquaredDeviation() func(o *options) { + return func(o *options) { + o.enableSumOfSquaredDeviation = true + } +} + +// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries. +// If this is used, metric descriptors are not exported. +func WithCreateServiceTimeSeries() func(o *options) { + return func(o *options) { + o.createServiceTimeSeries = true + o.disableCreateMetricDescriptors = true + } +} + +// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided +// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if +// found, would be included in the MonitoredResource labels. +func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) { + return func(o *options) { + mrLabelSet := make(map[string]struct{}) + for _, label := range mrLabels { + mrLabelSet[label] = struct{}{} + } + o.monitoredResourceDescription = MonitoredResourceDescription{ + mrType: mrType, + mrLabels: mrLabelSet, + } + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go new file mode 100644 index 000000000..e31119fc1 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go @@ -0,0 +1,21 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +// Version is the current release version of the OpenTelemetry +// Operations Metric Exporter in use. +func Version() string { + return "0.48.1" +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go new file mode 100644 index 000000000..4b5af517f --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go @@ -0,0 +1,286 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourcemapping + +import ( + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +const ( + ProjectIDAttributeKey = "gcp.project.id" + + awsAccount = "aws_account" + awsEc2Instance = "aws_ec2_instance" + clusterName = "cluster_name" + containerName = "container_name" + gceInstance = "gce_instance" + genericNode = "generic_node" + genericTask = "generic_task" + instanceID = "instance_id" + job = "job" + k8sCluster = "k8s_cluster" + k8sContainer = "k8s_container" + k8sNode = "k8s_node" + k8sPod = "k8s_pod" + location = "location" + namespace = "namespace" + namespaceName = "namespace_name" + nodeID = "node_id" + nodeName = "node_name" + podName = "pod_name" + region = "region" + taskID = "task_id" + zone = "zone" + gaeInstance = "gae_instance" + gaeApp = "gae_app" + gaeModuleID = "module_id" + gaeVersionID = "version_id" + cloudRunRevision = "cloud_run_revision" + cloudFunction = "cloud_function" + cloudFunctionName = "function_name" + serviceName = "service_name" + configurationName = "configuration_name" + revisionName = "revision_name" + bmsInstance = "baremetalsolution.googleapis.com/Instance" + unknownServicePrefix = "unknown_service" +) + +var ( + // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel + // resource for a given monitored resource type. + monitoredResourceMappings = map[string]map[string]struct { + // If none of the otelKeys are present in the Resource, fallback to this literal value + fallbackLiteral string + // OTel resource keys to try and populate the resource label from. For entries with + // multiple OTel resource keys, the keys' values will be coalesced in order until there + // is a non-empty value. + otelKeys []string + }{ + gceInstance: { + zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}}, + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + }, + k8sContainer: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}}, + podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}}, + containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}}, + }, + k8sPod: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}}, + podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}}, + }, + k8sNode: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}}, + }, + k8sCluster: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + }, + gaeInstance: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}}, + gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}}, + instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}}, + }, + gaeApp: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}}, + gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}}, + }, + awsEc2Instance: { + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + region: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + }, + awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}}, + }, + bmsInstance: { + location: {otelKeys: []string{string(semconv.CloudRegionKey)}}, + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + }, + genericTask: { + location: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + fallbackLiteral: "global", + }, + namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}}, + job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}}, + taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}}, + }, + genericNode: { + location: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + fallbackLiteral: "global", + }, + namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}}, + nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}}, + }, + } +) + +// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK. +type ReadOnlyAttributes interface { + GetString(string) (string, bool) +} + +// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a +// GCP monitored resource type and label set for Cloud Logging. +// E.g. +// This may output `gce_instance` type with appropriate labels. +func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey)) + switch cloudPlatform { + case semconv.CloudPlatformGCPAppEngine.Value.AsString(): + return createMonitoredResource(gaeApp, attrs) + default: + return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs) + } +} + +// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a +// GCP monitored resource type and label set for Cloud Monitoring +// E.g. +// This may output `gce_instance` type with appropriate labels. +func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey)) + switch cloudPlatform { + case semconv.CloudPlatformGCPAppEngine.Value.AsString(): + return createMonitoredResource(gaeInstance, attrs) + default: + return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs) + } +} + +func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + switch cloudPlatform { + case semconv.CloudPlatformGCPComputeEngine.Value.AsString(): + return createMonitoredResource(gceInstance, attrs) + case semconv.CloudPlatformAWSEC2.Value.AsString(): + return createMonitoredResource(awsEc2Instance, attrs) + // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution + // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way + // into the semconv module. + case "gcp_bare_metal_solution": + return createMonitoredResource(bmsInstance, attrs) + default: + // if k8s.cluster.name is set, pattern match for various k8s resources. + // this will also match non-cloud k8s platforms like minikube. + if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok { + // Try for most to least specific k8s_container, k8s_pod, etc + if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok { + return createMonitoredResource(k8sContainer, attrs) + } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok { + return createMonitoredResource(k8sPod, attrs) + } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok { + return createMonitoredResource(k8sNode, attrs) + } else { + return createMonitoredResource(k8sCluster, attrs) + } + } + + // Fallback to generic_task + _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey)) + _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey)) + _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey)) + _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey)) + if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) { + return createMonitoredResource(genericTask, attrs) + } + + // Everything else fallback to generic_node + return createMonitoredResource(genericNode, attrs) + } +} + +func createMonitoredResource( + monitoredResourceType string, + resourceAttrs ReadOnlyAttributes, +) *monitoredrespb.MonitoredResource { + mappings := monitoredResourceMappings[monitoredResourceType] + mrLabels := make(map[string]string, len(mappings)) + + for mrKey, mappingConfig := range mappings { + mrValue := "" + ok := false + // Coalesce the possible keys in order + for _, otelKey := range mappingConfig.otelKeys { + mrValue, ok = resourceAttrs.GetString(otelKey) + if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) { + break + } + } + if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) { + // the service name started with unknown_service, and was ignored above + mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey)) + } + if !ok || mrValue == "" { + mrValue = mappingConfig.fallbackLiteral + } + mrLabels[mrKey] = sanitizeUTF8(mrValue) + } + return &monitoredrespb.MonitoredResource{ + Type: monitoredResourceType, + Labels: mrLabels, + } +} + +func contains(list []string, element string) bool { + for _, item := range list { + if item == element { + return true + } + } + return false +} + +func sanitizeUTF8(s string) string { + return strings.ToValidUTF8(s, "�") +} diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index 186c2eb18..40f5f333b 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59 +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:de2a0a20c1c3b39c3de829196de9694d09f97cd18fda1004de855ed2b4c841ba USER root diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go index f629a6a2e..a6fa3d4a2 100644 --- a/vendor/github.com/IBM/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -1101,7 +1101,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1134,7 +1134,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go index 268696cf4..d0d5b87b8 100644 --- a/vendor/github.com/IBM/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -59,7 +59,8 @@ type Broker struct { kerberosAuthenticator GSSAPIKerberosAuth clientSessionReauthenticationTimeMs int64 - throttleTimer *time.Timer + throttleTimer *time.Timer + throttleTimerLock sync.Mutex } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -1697,6 +1698,8 @@ func (b *Broker) handleThrottledResponse(resp protocolBody) { } func (b *Broker) setThrottle(throttleTime time.Duration) { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { // if there is an existing timer stop/clear it if !b.throttleTimer.Stop() { @@ -1707,6 +1710,8 @@ func (b *Broker) setThrottle(throttleTime time.Duration) { } func (b *Broker) waitIfThrottled() { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID()) <-b.throttleTimer.C diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go index ad970a3f0..f2f197887 100644 --- a/vendor/github.com/IBM/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -387,7 +387,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to @@ -523,7 +523,7 @@ func NewConfig() *Config { c.Metadata.Full = true c.Metadata.AllowAutoTopicCreation = true - c.Producer.MaxMessageBytes = 1000000 + c.Producer.MaxMessageBytes = 1024 * 1024 c.Producer.RequiredAcks = WaitForLocal c.Producer.Timeout = 10 * time.Second c.Producer.Partitioner = NewHashPartitioner diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index e916416d5..204768e32 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' services: zookeeper-1: hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '1' @@ -15,6 +15,7 @@ services: zookeeper-2: hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '2' @@ -27,6 +28,7 @@ services: zookeeper-3: hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '3' @@ -39,6 +41,7 @@ services: kafka-1: hostname: 'kafka-1' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -84,6 +87,7 @@ services: kafka-2: hostname: 'kafka-2' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -129,6 +133,7 @@ services: kafka-3: hostname: 'kafka-3' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -174,6 +179,7 @@ services: kafka-4: hostname: 'kafka-4' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -219,6 +225,7 @@ services: kafka-5: hostname: 'kafka-5' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -264,6 +271,7 @@ services: toxiproxy: hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' + init: true healthcheck: test: ['CMD', '/toxiproxy-cli', 'l'] interval: 15s diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go index 1bf545908..294865127 100644 --- a/vendor/github.com/IBM/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -251,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go index ca7e13dab..bf20b75e9 100644 --- a/vendor/github.com/IBM/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go index 7d960abaf..de0acc7f8 100644 --- a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "os" + "strconv" "time" "github.com/alibabacloud-go/tea/tea" @@ -154,6 +155,9 @@ func (r *OIDCCredential) updateCredential() (err error) { if r.Policy != "" { request.QueryParams["Policy"] = r.Policy } + if r.RoleSessionExpiration > 0 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration) + } request.QueryParams["RoleSessionName"] = r.RoleSessionName request.QueryParams["Version"] = "2015-04-01" request.QueryParams["SignatureNonce"] = utils.GetUUID() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index 7d6534ede..ef0edc727 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,3 +1,58 @@ +# v1.37.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.4 (2024-10-03) + +* No change notes available for this release. + +# v1.36.3 (2024-09-27) + +* No change notes available for this release. + +# v1.36.2 (2024-09-25) + +* No change notes available for this release. + +# v1.36.1 (2024-09-23) + +* No change notes available for this release. + +# v1.36.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.35.7 (2024-09-04) + +* No change notes available for this release. + +# v1.35.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.5 (2024-08-22) + +* No change notes available for this release. + +# v1.35.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.35.3 (2024-07-10.2) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index d05920642..64c4d0f80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -4,6 +4,7 @@ package kms import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -19,7 +20,9 @@ import ( smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" @@ -30,6 +33,133 @@ import ( const ServiceID = "KMS" const ServiceAPIVersion = "2014-11-01" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/kms") +} + // Client provides the API client to make operations call for AWS Key Management // Service. type Client struct { @@ -57,6 +187,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -89,8 +223,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -114,15 +255,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -160,7 +342,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -238,16 +420,15 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AccountIDEndpointMode: cfg.AccountIDEndpointMode, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -435,6 +616,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -478,6 +683,7 @@ func addIsPaginatorUserAgent(o *Options) { func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") }) if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { return err @@ -541,25 +747,6 @@ func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } -func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { - switch mode { - case aws.AccountIDEndpointModeUnset: - case aws.AccountIDEndpointModePreferred: - case aws.AccountIDEndpointModeDisabled: - case aws.AccountIDEndpointModeRequired: - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { - return fmt.Errorf("accountID is required but not set") - } else if ca.Credentials.AccountID == "" { - return fmt.Errorf("accountID is required but not set") - } - // default check in case invalid mode is configured through request config - default: - return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) - } - - return nil -} - func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -575,6 +762,18 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -626,3 +825,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go index 71c9a0c01..706c00b30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -125,6 +125,9 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -164,6 +167,18 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go index 967eb34fd..8656b61c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -179,6 +179,9 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -218,6 +221,18 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go index cca6fb5c9..84e8a58d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -178,6 +178,9 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -217,6 +220,18 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go index b3e5109cc..548da759f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -327,6 +327,9 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -366,6 +369,18 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go index 95754b366..d30a5e80e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -285,6 +285,9 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -324,6 +327,18 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go index 25aa94a07..259c5fce6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -533,6 +533,9 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -572,6 +575,18 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go index 6558f5bb8..93d64f580 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -296,6 +296,9 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -335,6 +338,18 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go index 4dab0d253..58af22e36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +170,18 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go index bab682962..b773858b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -184,6 +187,18 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go index 480dfbcca..c952f6305 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +169,18 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go index 1ec4daeaf..fef5dc426 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go @@ -298,6 +298,9 @@ func (c *Client) addOperationDeriveSharedSecretMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -337,6 +340,18 @@ func (c *Client) addOperationDeriveSharedSecretMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go index 107f19bc7..438537acd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -178,6 +178,9 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -214,6 +217,18 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go index 67b026c3b..3ac7fafa0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -191,6 +191,9 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -230,6 +233,18 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go index 20bfe6cb9..3d247346d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +161,18 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go index 0a12984ac..30ee5155f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -188,6 +191,18 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go index 0b563fcbc..55327cb72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -173,6 +176,18 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go index a792631d2..fc8402f5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -116,6 +116,9 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -155,6 +158,18 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go index 2b6bae01b..3236e4970 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -188,6 +188,9 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -227,6 +230,18 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go index 9430f62f8..ac6031bdf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -256,6 +256,9 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -295,6 +298,18 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go index d030a6943..5cab73d3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -311,6 +311,9 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -350,6 +353,18 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go index 8934cc5d8..394bac814 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -309,6 +309,9 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -348,6 +351,18 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go index bed654be6..5bbc48fda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -236,6 +236,9 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -275,6 +278,18 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go index bf1304753..69b955021 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -236,6 +236,9 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -275,6 +278,18 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go index cec517c2b..a5505e333 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -181,6 +181,9 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -220,6 +223,18 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go index 29aba8fa1..3ff869365 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -169,6 +169,9 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -205,6 +208,18 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go index ee38bff33..b0368d534 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -123,6 +123,9 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -162,6 +165,18 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go index 18d0e207e..0484bb59c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -192,6 +192,9 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -231,6 +234,18 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go index f1af81f15..e3ffaca7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -240,6 +240,9 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -279,6 +282,18 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go index a32205fe3..245cf4610 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -231,6 +231,9 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -270,6 +273,18 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go index 60395169b..ee7037a49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -255,6 +255,9 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -294,6 +297,18 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go index 67bc9d7aa..e7f68db3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -166,6 +166,9 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -202,6 +205,18 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go index 2ee6363dd..bd9433fa6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -172,6 +172,9 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -211,6 +214,18 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go index 34522dd1a..c68867a53 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -146,6 +146,9 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -185,6 +188,18 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go index bc3524b32..eed276b15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go @@ -154,6 +154,9 @@ func (c *Client) addOperationListKeyRotationsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -193,6 +196,18 @@ func (c *Client) addOperationListKeyRotationsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go index 80ed5ba24..9c3860fcc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -131,6 +131,9 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +170,18 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go index a18dd7f01..7bdf6304c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -162,6 +162,9 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -201,6 +204,18 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go index 2bc033112..7fe68d8e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -167,6 +167,9 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -206,6 +209,18 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go index 53c376690..4957cdee9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -178,6 +178,9 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -217,6 +220,18 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go index f65a577fa..7d6f278de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -335,6 +335,9 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -374,6 +377,18 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go index 84fa3d11e..4c5bb7b17 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -332,6 +332,9 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -371,6 +374,18 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go index 9158b1c1c..016739eab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -150,6 +150,9 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -186,6 +189,18 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go index ed45c1596..c97a332bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -149,6 +149,9 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -188,6 +191,18 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go index ed5d27a06..5424efcf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go @@ -171,6 +171,9 @@ func (c *Client) addOperationRotateKeyOnDemandMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -210,6 +213,18 @@ func (c *Client) addOperationRotateKeyOnDemandMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go index 6000bc763..090468d5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -202,6 +202,9 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -241,6 +244,18 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go index b3b7d732f..553404e23 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -259,6 +259,9 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -298,6 +301,18 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go index 6d964f39b..6ec3ee335 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -160,6 +160,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -199,6 +202,18 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go index d62a3ae0a..ee01e6e7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -143,6 +143,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +185,18 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go index fc9ad5857..2238087b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -174,6 +174,9 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -213,6 +216,18 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go index a615be4b0..fa7c84dfd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -284,6 +284,9 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -323,6 +326,18 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go index 82e9e6e5c..83ed78459 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -127,6 +127,9 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +169,18 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go index 3792c12db..e79f816ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -182,6 +182,9 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -221,6 +224,18 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go index 8f3676ae1..4a25ff29f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -247,6 +247,9 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -286,6 +289,18 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go index 7be101c10..484906a5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -180,6 +180,9 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -219,6 +222,18 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go index fea1da697..f9d4ef035 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go @@ -8,7 +8,9 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -145,6 +147,9 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { @@ -157,6 +162,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -216,7 +224,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -226,12 +237,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -247,6 +266,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -256,6 +276,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -276,9 +299,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go index f5e1a43d4..acd0c603d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -15,6 +15,7 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" @@ -45,6 +46,10 @@ func (m *awsAwsjson11_deserializeOpCancelKeyDeletion) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -164,6 +169,10 @@ func (m *awsAwsjson11_deserializeOpConnectCustomKeyStore) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -283,6 +292,10 @@ func (m *awsAwsjson11_deserializeOpCreateAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -386,6 +399,10 @@ func (m *awsAwsjson11_deserializeOpCreateCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -541,6 +558,10 @@ func (m *awsAwsjson11_deserializeOpCreateGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -672,6 +693,10 @@ func (m *awsAwsjson11_deserializeOpCreateKey) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -815,6 +840,10 @@ func (m *awsAwsjson11_deserializeOpDecrypt) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -952,6 +981,10 @@ func (m *awsAwsjson11_deserializeOpDeleteAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1046,6 +1079,10 @@ func (m *awsAwsjson11_deserializeOpDeleteCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1162,6 +1199,10 @@ func (m *awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) HandleDeserialize( return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1262,6 +1303,10 @@ func (m *awsAwsjson11_deserializeOpDeriveSharedSecret) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1393,6 +1438,10 @@ func (m *awsAwsjson11_deserializeOpDescribeCustomKeyStores) HandleDeserialize(ct return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1506,6 +1555,10 @@ func (m *awsAwsjson11_deserializeOpDescribeKey) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1622,6 +1675,10 @@ func (m *awsAwsjson11_deserializeOpDisableKey) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1719,6 +1776,10 @@ func (m *awsAwsjson11_deserializeOpDisableKeyRotation) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1822,6 +1883,10 @@ func (m *awsAwsjson11_deserializeOpDisconnectCustomKeyStore) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1935,6 +2000,10 @@ func (m *awsAwsjson11_deserializeOpEnableKey) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2035,6 +2104,10 @@ func (m *awsAwsjson11_deserializeOpEnableKeyRotation) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2138,6 +2211,10 @@ func (m *awsAwsjson11_deserializeOpEncrypt) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2269,6 +2346,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKey) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2400,6 +2481,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyPair) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2534,6 +2619,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) HandleDe return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2668,6 +2757,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2799,6 +2892,10 @@ func (m *awsAwsjson11_deserializeOpGenerateMac) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2927,6 +3024,10 @@ func (m *awsAwsjson11_deserializeOpGenerateRandom) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3046,6 +3147,10 @@ func (m *awsAwsjson11_deserializeOpGetKeyPolicy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3165,6 +3270,10 @@ func (m *awsAwsjson11_deserializeOpGetKeyRotationStatus) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3287,6 +3396,10 @@ func (m *awsAwsjson11_deserializeOpGetParametersForImport) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3409,6 +3522,10 @@ func (m *awsAwsjson11_deserializeOpGetPublicKey) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3543,6 +3660,10 @@ func (m *awsAwsjson11_deserializeOpImportKeyMaterial) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3677,6 +3798,10 @@ func (m *awsAwsjson11_deserializeOpListAliases) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3796,6 +3921,10 @@ func (m *awsAwsjson11_deserializeOpListGrants) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3921,6 +4050,10 @@ func (m *awsAwsjson11_deserializeOpListKeyPolicies) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4040,6 +4173,10 @@ func (m *awsAwsjson11_deserializeOpListKeyRotations) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4162,6 +4299,10 @@ func (m *awsAwsjson11_deserializeOpListKeys) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4275,6 +4416,10 @@ func (m *awsAwsjson11_deserializeOpListResourceTags) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4391,6 +4536,10 @@ func (m *awsAwsjson11_deserializeOpListRetirableGrants) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4510,6 +4659,10 @@ func (m *awsAwsjson11_deserializeOpPutKeyPolicy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4616,6 +4769,10 @@ func (m *awsAwsjson11_deserializeOpReEncrypt) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4753,6 +4910,10 @@ func (m *awsAwsjson11_deserializeOpReplicateKey) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4887,6 +5048,10 @@ func (m *awsAwsjson11_deserializeOpRetireGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4993,6 +5158,10 @@ func (m *awsAwsjson11_deserializeOpRevokeGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5096,6 +5265,10 @@ func (m *awsAwsjson11_deserializeOpRotateKeyOnDemand) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5227,6 +5400,10 @@ func (m *awsAwsjson11_deserializeOpScheduleKeyDeletion) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5346,6 +5523,10 @@ func (m *awsAwsjson11_deserializeOpSign) HandleDeserialize(ctx context.Context, return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5477,6 +5658,10 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5577,6 +5762,10 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5674,6 +5863,10 @@ func (m *awsAwsjson11_deserializeOpUpdateAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5771,6 +5964,10 @@ func (m *awsAwsjson11_deserializeOpUpdateCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5926,6 +6123,10 @@ func (m *awsAwsjson11_deserializeOpUpdateKeyDescription) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6023,6 +6224,10 @@ func (m *awsAwsjson11_deserializeOpUpdatePrimaryRegion) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6123,6 +6328,10 @@ func (m *awsAwsjson11_deserializeOpVerify) HandleDeserialize(ctx context.Context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6257,6 +6466,10 @@ func (m *awsAwsjson11_deserializeOpVerifyMac) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go index 57bd194b6..0d9f88d68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -483,14 +484,13 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } - if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { - return out, metadata, fmt.Errorf("invalid accountID set: %w", err) - } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -501,11 +501,16 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid } params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -527,5 +532,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index 66c76d145..b9c3b883f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,4 +3,4 @@ package kms // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.35.3" +const goModuleVersion = "1.37.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go index bcd411119..0c6bbd587 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -94,7 +94,7 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), @@ -346,6 +346,24 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-5.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go index 2402184dd..e7f5513b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -24,9 +26,6 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error - // Indicates how aws account ID is applied in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - // The optional application specific identifier appended to the User-Agent header. AppID string @@ -69,6 +68,9 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string @@ -103,6 +105,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved // value was at that point in time. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go index 0548a52e0..1f9dabb9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -12,6 +12,7 @@ import ( smithyjson "github.com/aws/smithy-go/encoding/json" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "path" ) @@ -26,6 +27,10 @@ func (*awsAwsjson11_serializeOpCancelKeyDeletion) ID() string { func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -68,6 +73,8 @@ func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -81,6 +88,10 @@ func (*awsAwsjson11_serializeOpConnectCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -123,6 +134,8 @@ func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -136,6 +149,10 @@ func (*awsAwsjson11_serializeOpCreateAlias) ID() string { func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -178,6 +195,8 @@ func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -191,6 +210,10 @@ func (*awsAwsjson11_serializeOpCreateCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -233,6 +256,8 @@ func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -246,6 +271,10 @@ func (*awsAwsjson11_serializeOpCreateGrant) ID() string { func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -288,6 +317,8 @@ func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -301,6 +332,10 @@ func (*awsAwsjson11_serializeOpCreateKey) ID() string { func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -343,6 +378,8 @@ func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -356,6 +393,10 @@ func (*awsAwsjson11_serializeOpDecrypt) ID() string { func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -398,6 +439,8 @@ func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -411,6 +454,10 @@ func (*awsAwsjson11_serializeOpDeleteAlias) ID() string { func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -453,6 +500,8 @@ func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -466,6 +515,10 @@ func (*awsAwsjson11_serializeOpDeleteCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -508,6 +561,8 @@ func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -521,6 +576,10 @@ func (*awsAwsjson11_serializeOpDeleteImportedKeyMaterial) ID() string { func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -563,6 +622,8 @@ func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -576,6 +637,10 @@ func (*awsAwsjson11_serializeOpDeriveSharedSecret) ID() string { func (m *awsAwsjson11_serializeOpDeriveSharedSecret) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -618,6 +683,8 @@ func (m *awsAwsjson11_serializeOpDeriveSharedSecret) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -631,6 +698,10 @@ func (*awsAwsjson11_serializeOpDescribeCustomKeyStores) ID() string { func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -673,6 +744,8 @@ func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -686,6 +759,10 @@ func (*awsAwsjson11_serializeOpDescribeKey) ID() string { func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -728,6 +805,8 @@ func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -741,6 +820,10 @@ func (*awsAwsjson11_serializeOpDisableKey) ID() string { func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -783,6 +866,8 @@ func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -796,6 +881,10 @@ func (*awsAwsjson11_serializeOpDisableKeyRotation) ID() string { func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -838,6 +927,8 @@ func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -851,6 +942,10 @@ func (*awsAwsjson11_serializeOpDisconnectCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -893,6 +988,8 @@ func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -906,6 +1003,10 @@ func (*awsAwsjson11_serializeOpEnableKey) ID() string { func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -948,6 +1049,8 @@ func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -961,6 +1064,10 @@ func (*awsAwsjson11_serializeOpEnableKeyRotation) ID() string { func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1003,6 +1110,8 @@ func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1016,6 +1125,10 @@ func (*awsAwsjson11_serializeOpEncrypt) ID() string { func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1058,6 +1171,8 @@ func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1071,6 +1186,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKey) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1113,6 +1232,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1126,6 +1247,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyPair) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1168,6 +1293,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1181,6 +1308,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) ID() string func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1223,6 +1354,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSeri } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1236,6 +1369,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1278,6 +1415,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1291,6 +1430,10 @@ func (*awsAwsjson11_serializeOpGenerateMac) ID() string { func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1333,6 +1476,8 @@ func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1346,6 +1491,10 @@ func (*awsAwsjson11_serializeOpGenerateRandom) ID() string { func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1388,6 +1537,8 @@ func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1401,6 +1552,10 @@ func (*awsAwsjson11_serializeOpGetKeyPolicy) ID() string { func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1443,6 +1598,8 @@ func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1456,6 +1613,10 @@ func (*awsAwsjson11_serializeOpGetKeyRotationStatus) ID() string { func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1498,6 +1659,8 @@ func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1511,6 +1674,10 @@ func (*awsAwsjson11_serializeOpGetParametersForImport) ID() string { func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1553,6 +1720,8 @@ func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1566,6 +1735,10 @@ func (*awsAwsjson11_serializeOpGetPublicKey) ID() string { func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1608,6 +1781,8 @@ func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1621,6 +1796,10 @@ func (*awsAwsjson11_serializeOpImportKeyMaterial) ID() string { func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1663,6 +1842,8 @@ func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1676,6 +1857,10 @@ func (*awsAwsjson11_serializeOpListAliases) ID() string { func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1718,6 +1903,8 @@ func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1731,6 +1918,10 @@ func (*awsAwsjson11_serializeOpListGrants) ID() string { func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1773,6 +1964,8 @@ func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1786,6 +1979,10 @@ func (*awsAwsjson11_serializeOpListKeyPolicies) ID() string { func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1828,6 +2025,8 @@ func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1841,6 +2040,10 @@ func (*awsAwsjson11_serializeOpListKeyRotations) ID() string { func (m *awsAwsjson11_serializeOpListKeyRotations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1883,6 +2086,8 @@ func (m *awsAwsjson11_serializeOpListKeyRotations) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1896,6 +2101,10 @@ func (*awsAwsjson11_serializeOpListKeys) ID() string { func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1938,6 +2147,8 @@ func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1951,6 +2162,10 @@ func (*awsAwsjson11_serializeOpListResourceTags) ID() string { func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1993,6 +2208,8 @@ func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2006,6 +2223,10 @@ func (*awsAwsjson11_serializeOpListRetirableGrants) ID() string { func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2048,6 +2269,8 @@ func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2061,6 +2284,10 @@ func (*awsAwsjson11_serializeOpPutKeyPolicy) ID() string { func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2103,6 +2330,8 @@ func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2116,6 +2345,10 @@ func (*awsAwsjson11_serializeOpReEncrypt) ID() string { func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2158,6 +2391,8 @@ func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2171,6 +2406,10 @@ func (*awsAwsjson11_serializeOpReplicateKey) ID() string { func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2213,6 +2452,8 @@ func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2226,6 +2467,10 @@ func (*awsAwsjson11_serializeOpRetireGrant) ID() string { func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2268,6 +2513,8 @@ func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2281,6 +2528,10 @@ func (*awsAwsjson11_serializeOpRevokeGrant) ID() string { func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2323,6 +2574,8 @@ func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2336,6 +2589,10 @@ func (*awsAwsjson11_serializeOpRotateKeyOnDemand) ID() string { func (m *awsAwsjson11_serializeOpRotateKeyOnDemand) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2378,6 +2635,8 @@ func (m *awsAwsjson11_serializeOpRotateKeyOnDemand) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2391,6 +2650,10 @@ func (*awsAwsjson11_serializeOpScheduleKeyDeletion) ID() string { func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2433,6 +2696,8 @@ func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2446,6 +2711,10 @@ func (*awsAwsjson11_serializeOpSign) ID() string { func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2488,6 +2757,8 @@ func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in m } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2501,6 +2772,10 @@ func (*awsAwsjson11_serializeOpTagResource) ID() string { func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2543,6 +2818,8 @@ func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2556,6 +2833,10 @@ func (*awsAwsjson11_serializeOpUntagResource) ID() string { func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2598,6 +2879,8 @@ func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2611,6 +2894,10 @@ func (*awsAwsjson11_serializeOpUpdateAlias) ID() string { func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2653,6 +2940,8 @@ func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2666,6 +2955,10 @@ func (*awsAwsjson11_serializeOpUpdateCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2708,6 +3001,8 @@ func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2721,6 +3016,10 @@ func (*awsAwsjson11_serializeOpUpdateKeyDescription) ID() string { func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2763,6 +3062,8 @@ func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2776,6 +3077,10 @@ func (*awsAwsjson11_serializeOpUpdatePrimaryRegion) ID() string { func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2818,6 +3123,8 @@ func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2831,6 +3138,10 @@ func (*awsAwsjson11_serializeOpVerify) ID() string { func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2873,6 +3184,8 @@ func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2886,6 +3199,10 @@ func (*awsAwsjson11_serializeOpVerifyMac) ID() string { func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2928,6 +3245,8 @@ func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsAwsjson11_serializeDocumentEncryptionContextType(v map[string]string, value smithyjson.Value) error { diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml deleted file mode 100644 index 47a6a46ec..000000000 --- a/vendor/github.com/cenkalti/backoff/v3/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore similarity index 94% rename from vendor/github.com/cenkalti/backoff/v3/.gitignore rename to vendor/github.com/cenkalti/backoff/v4/.gitignore index 00268614f..50d95c548 100644 --- a/vendor/github.com/cenkalti/backoff/v3/.gitignore +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -20,3 +20,6 @@ _cgo_export.* _testmain.go *.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/LICENSE rename to vendor/github.com/cenkalti/backoff/v4/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md similarity index 67% rename from vendor/github.com/cenkalti/backoff/v3/README.md rename to vendor/github.com/cenkalti/backoff/v4/README.md index 3673df487..9433004a2 100644 --- a/vendor/github.com/cenkalti/backoff/v3/README.md +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,10 +9,9 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. -godoc.org does not support modules yet, -so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation. +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. ## Contributing @@ -20,14 +19,12 @@ so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the docume * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master [coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master [coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/backoff.go rename to vendor/github.com/cenkalti/backoff/v4/backoff.go diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go similarity index 86% rename from vendor/github.com/cenkalti/backoff/v3/context.go rename to vendor/github.com/cenkalti/backoff/v4/context.go index fcff86c1b..48482330e 100644 --- a/vendor/github.com/cenkalti/backoff/v3/context.go +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -57,10 +57,6 @@ func (b *backOffContext) NextBackOff() time.Duration { case <-b.ctx.Done(): return Stop default: + return b.BackOff.NextBackOff() } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple - return Stop - } - return next } diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go similarity index 67% rename from vendor/github.com/cenkalti/backoff/v3/exponential.go rename to vendor/github.com/cenkalti/backoff/v4/exponential.go index cb11cc1d2..aac99f196 100644 --- a/vendor/github.com/cenkalti/backoff/v3/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -56,9 +56,10 @@ type ExponentialBackOff struct { RandomizationFactor float64 Multiplier float64 MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. + // After MaxElapsedTime the ExponentialBackOff returns Stop. // It never stops if MaxElapsedTime == 0. MaxElapsedTime time.Duration + Stop time.Duration Clock Clock currentInterval time.Duration @@ -70,6 +71,9 @@ type Clock interface { Now() time.Time } +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + // Default values for ExponentialBackOff. const ( DefaultInitialInterval = 500 * time.Millisecond @@ -80,19 +84,72 @@ const ( ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { b := &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, Multiplier: DefaultMultiplier, MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, Clock: SystemClock, } + for _, fn := range opts { + fn(b) + } b.Reset() return b } +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + type systemClock struct{} func (t systemClock) Now() time.Time { @@ -113,11 +170,13 @@ func (b *ExponentialBackOff) Reset() { // Randomized interval = RetryInterval * (1 ± RandomizationFactor) func (b *ExponentialBackOff) NextBackOff() time.Duration { // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + return next } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance @@ -141,8 +200,11 @@ func (b *ExponentialBackOff) incrementCurrentInterval() { } // Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta var maxInterval = float64(currentInterval) + delta diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go similarity index 54% rename from vendor/github.com/cenkalti/backoff/v3/retry.go rename to vendor/github.com/cenkalti/backoff/v4/retry.go index 6c776ccf8..b9c0c51cd 100644 --- a/vendor/github.com/cenkalti/backoff/v3/retry.go +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -1,11 +1,24 @@ package backoff -import "time" +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // @@ -25,18 +38,41 @@ func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { return RetryNotifyWithTimer(operation, b, notify, nil) } +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer // for each failed attempt before sleep. // A default timer that uses system timer is used when nil is passed. func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) if t == nil { t = &defaultTimer{} } @@ -49,16 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer b.Reset() for { - if err = operation(); err == nil { - return nil + res, err = operation() + if err == nil { + return res, nil } - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err } if next = b.NextBackOff(); next == Stop { - return err + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err } if notify != nil { @@ -69,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer select { case <-ctx.Done(): - return ctx.Err() + return res, ctx.Err() case <-t.C(): } } @@ -88,8 +130,16 @@ func (e *PermanentError) Unwrap() error { return e.Err } +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + // Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { +func Permanent(err error) error { + if err == nil { + return nil + } return &PermanentError{ Err: err, } diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go similarity index 97% rename from vendor/github.com/cenkalti/backoff/v3/ticker.go rename to vendor/github.com/cenkalti/backoff/v4/ticker.go index ed699e0e3..df9d68bce 100644 --- a/vendor/github.com/cenkalti/backoff/v3/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -33,6 +33,9 @@ func NewTicker(b BackOff) *Ticker { // NewTickerWithTimer returns a new Ticker with a custom timer. // A default timer that uses system timer is used when nil is passed. func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } c := make(chan time.Time) t := &Ticker{ C: c, diff --git a/vendor/github.com/cenkalti/backoff/v3/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/timer.go rename to vendor/github.com/cenkalti/backoff/v4/timer.go diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go similarity index 94% rename from vendor/github.com/cenkalti/backoff/v3/tries.go rename to vendor/github.com/cenkalti/backoff/v4/tries.go index cfeefd9b7..28d58ca37 100644 --- a/vendor/github.com/cenkalti/backoff/v3/tries.go +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -20,6 +20,9 @@ type backOffTries struct { } func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } if b.maxTries > 0 { if b.maxTries <= b.numTries { return Stop diff --git a/vendor/github.com/cncf/xds/go/LICENSE b/vendor/github.com/cncf/xds/go/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go new file mode 100644 index 000000000..0281b3ee5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go @@ -0,0 +1,411 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/migrate.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` +} + +func (x *MigrateAnnotation) Reset() { + *x = MigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateAnnotation) ProtoMessage() {} + +func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead. +func (*MigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{0} +} + +func (x *MigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +type FieldMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` + OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"` +} + +func (x *FieldMigrateAnnotation) Reset() { + *x = FieldMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMigrateAnnotation) ProtoMessage() {} + +func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{1} +} + +func (x *FieldMigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +func (x *FieldMigrateAnnotation) GetOneofPromotion() string { + if x != nil { + return x.OneofPromotion + } + return "" +} + +type FileMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"` +} + +func (x *FileMigrateAnnotation) Reset() { + *x = FileMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileMigrateAnnotation) ProtoMessage() {} + +func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{2} +} + +func (x *FileMigrateAnnotation) GetMoveToPackage() string { + if x != nil { + return x.MoveToPackage + } + return "" +} + +var file_udpa_annotations_migrate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.message_migrate", + Tag: "bytes,171962766,opt,name=message_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldMigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.field_migrate", + Tag: "bytes,171962766,opt,name=field_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.enum_migrate", + Tag: "bytes,171962766,opt,name=enum_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.enum_value_migrate", + Tag: "bytes,171962766,opt,name=enum_value_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileMigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.file_migrate", + Tag: "bytes,171962766,opt,name=file_migrate", + Filename: "udpa/annotations/migrate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional udpa.annotations.MigrateAnnotation message_migrate = 171962766; + E_MessageMigrate = &file_udpa_annotations_migrate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional udpa.annotations.FieldMigrateAnnotation field_migrate = 171962766; + E_FieldMigrate = &file_udpa_annotations_migrate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // optional udpa.annotations.MigrateAnnotation enum_migrate = 171962766; + E_EnumMigrate = &file_udpa_annotations_migrate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional udpa.annotations.MigrateAnnotation enum_value_migrate = 171962766; + E_EnumValueMigrate = &file_udpa_annotations_migrate_proto_extTypes[3] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional udpa.annotations.FileMigrateAnnotation file_migrate = 171962766; + E_FileMigrate = &file_udpa_annotations_migrate_proto_extTypes[4] +) + +var File_udpa_annotations_migrate_proto protoreflect.FileDescriptor + +var file_udpa_annotations_migrate_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f, + 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3f, 0x0a, 0x15, + 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x74, 0x6f, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x3a, 0x70, 0x0a, + 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, + 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, + 0x6f, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x3a, 0x67, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x8e, + 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, + 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x77, 0x0a, 0x12, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, + 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x10, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x3a, 0x6b, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x75, 0x64, 0x70, 0x61, + 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, + 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_udpa_annotations_migrate_proto_rawDescOnce sync.Once + file_udpa_annotations_migrate_proto_rawDescData = file_udpa_annotations_migrate_proto_rawDesc +) + +func file_udpa_annotations_migrate_proto_rawDescGZIP() []byte { + file_udpa_annotations_migrate_proto_rawDescOnce.Do(func() { + file_udpa_annotations_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_migrate_proto_rawDescData) + }) + return file_udpa_annotations_migrate_proto_rawDescData +} + +var file_udpa_annotations_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_udpa_annotations_migrate_proto_goTypes = []interface{}{ + (*MigrateAnnotation)(nil), // 0: udpa.annotations.MigrateAnnotation + (*FieldMigrateAnnotation)(nil), // 1: udpa.annotations.FieldMigrateAnnotation + (*FileMigrateAnnotation)(nil), // 2: udpa.annotations.FileMigrateAnnotation + (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions + (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions + (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions +} +var file_udpa_annotations_migrate_proto_depIdxs = []int32{ + 3, // 0: udpa.annotations.message_migrate:extendee -> google.protobuf.MessageOptions + 4, // 1: udpa.annotations.field_migrate:extendee -> google.protobuf.FieldOptions + 5, // 2: udpa.annotations.enum_migrate:extendee -> google.protobuf.EnumOptions + 6, // 3: udpa.annotations.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions + 7, // 4: udpa.annotations.file_migrate:extendee -> google.protobuf.FileOptions + 0, // 5: udpa.annotations.message_migrate:type_name -> udpa.annotations.MigrateAnnotation + 1, // 6: udpa.annotations.field_migrate:type_name -> udpa.annotations.FieldMigrateAnnotation + 0, // 7: udpa.annotations.enum_migrate:type_name -> udpa.annotations.MigrateAnnotation + 0, // 8: udpa.annotations.enum_value_migrate:type_name -> udpa.annotations.MigrateAnnotation + 2, // 9: udpa.annotations.file_migrate:type_name -> udpa.annotations.FileMigrateAnnotation + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 5, // [5:10] is the sub-list for extension type_name + 0, // [0:5] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_migrate_proto_init() } +func file_udpa_annotations_migrate_proto_init() { + if File_udpa_annotations_migrate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_udpa_annotations_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_udpa_annotations_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_migrate_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 5, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_migrate_proto_goTypes, + DependencyIndexes: file_udpa_annotations_migrate_proto_depIdxs, + MessageInfos: file_udpa_annotations_migrate_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_migrate_proto_extTypes, + }.Build() + File_udpa_annotations_migrate_proto = out.File + file_udpa_annotations_migrate_proto_rawDesc = nil + file_udpa_annotations_migrate_proto_goTypes = nil + file_udpa_annotations_migrate_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go new file mode 100644 index 000000000..38196d5eb --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go @@ -0,0 +1,350 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/migrate.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MigrateAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + + return nil +} + +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + +// MigrateAnnotationValidationError is the validation error returned by +// MigrateAnnotation.Validate if the designated constraints aren't met. +type MigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MigrateAnnotationValidationError) ErrorName() string { + return "MigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MigrateAnnotationValidationError{} + +// Validate checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + // no validation rules for OneofPromotion + + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FieldMigrateAnnotationValidationError is the validation error returned by +// FieldMigrateAnnotation.Validate if the designated constraints aren't met. +type FieldMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldMigrateAnnotationValidationError) ErrorName() string { + return "FieldMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldMigrateAnnotationValidationError{} + +// Validate checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MoveToPackage + + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FileMigrateAnnotationValidationError is the validation error returned by +// FileMigrateAnnotation.Validate if the designated constraints aren't met. +type FileMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileMigrateAnnotationValidationError) ErrorName() string { + return "FileMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileMigrateAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go new file mode 100644 index 000000000..cf858bd97 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go @@ -0,0 +1,196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/security.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FieldSecurityAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"` + ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"` +} + +func (x *FieldSecurityAnnotation) Reset() { + *x = FieldSecurityAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_security_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldSecurityAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldSecurityAnnotation) ProtoMessage() {} + +func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_security_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead. +func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_security_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool { + if x != nil { + return x.ConfigureForUntrustedDownstream + } + return false +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool { + if x != nil { + return x.ConfigureForUntrustedUpstream + } + return false +} + +var file_udpa_annotations_security_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldSecurityAnnotation)(nil), + Field: 11122993, + Name: "udpa.annotations.security", + Tag: "bytes,11122993,opt,name=security", + Filename: "udpa/annotations/security.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional udpa.annotations.FieldSecurityAnnotation security = 11122993; + E_Security = &file_udpa_annotations_security_proto_extTypes[0] +) + +var File_udpa_annotations_security_proto protoreflect.FileDescriptor + +var file_udpa_annotations_security_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, + 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, + 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x67, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0xb1, 0xf2, 0xa6, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x75, 0x64, 0x70, + 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, + 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_annotations_security_proto_rawDescOnce sync.Once + file_udpa_annotations_security_proto_rawDescData = file_udpa_annotations_security_proto_rawDesc +) + +func file_udpa_annotations_security_proto_rawDescGZIP() []byte { + file_udpa_annotations_security_proto_rawDescOnce.Do(func() { + file_udpa_annotations_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_security_proto_rawDescData) + }) + return file_udpa_annotations_security_proto_rawDescData +} + +var file_udpa_annotations_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_security_proto_goTypes = []interface{}{ + (*FieldSecurityAnnotation)(nil), // 0: udpa.annotations.FieldSecurityAnnotation + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_udpa_annotations_security_proto_depIdxs = []int32{ + 1, // 0: udpa.annotations.security:extendee -> google.protobuf.FieldOptions + 0, // 1: udpa.annotations.security:type_name -> udpa.annotations.FieldSecurityAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_security_proto_init() } +func file_udpa_annotations_security_proto_init() { + if File_udpa_annotations_security_proto != nil { + return + } + file_udpa_annotations_status_proto_init() + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldSecurityAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_security_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_security_proto_goTypes, + DependencyIndexes: file_udpa_annotations_security_proto_depIdxs, + MessageInfos: file_udpa_annotations_security_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_security_proto_extTypes, + }.Build() + File_udpa_annotations_security_proto = out.File + file_udpa_annotations_security_proto_rawDesc = nil + file_udpa_annotations_security_proto_goTypes = nil + file_udpa_annotations_security_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go new file mode 100644 index 000000000..acc9bd7a1 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/security.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FieldSecurityAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ConfigureForUntrustedDownstream + + // no validation rules for ConfigureForUntrustedUpstream + + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + + return nil +} + +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + +// FieldSecurityAnnotationValidationError is the validation error returned by +// FieldSecurityAnnotation.Validate if the designated constraints aren't met. +type FieldSecurityAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldSecurityAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldSecurityAnnotationValidationError) ErrorName() string { + return "FieldSecurityAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldSecurityAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldSecurityAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldSecurityAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldSecurityAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go new file mode 100644 index 000000000..2d5c78dc2 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/sensitive.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_udpa_annotations_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 76569463, + Name: "udpa.annotations.sensitive", + Tag: "varint,76569463,opt,name=sensitive", + Filename: "udpa/annotations/sensitive.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool sensitive = 76569463; + E_Sensitive = &file_udpa_annotations_sensitive_proto_extTypes[0] +) + +var File_udpa_annotations_sensitive_proto protoreflect.FileDescriptor + +var file_udpa_annotations_sensitive_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xf7, 0xb6, 0xc1, 0x24, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_udpa_annotations_sensitive_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions +} +var file_udpa_annotations_sensitive_proto_depIdxs = []int32{ + 0, // 0: udpa.annotations.sensitive:extendee -> google.protobuf.FieldOptions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_sensitive_proto_init() } +func file_udpa_annotations_sensitive_proto_init() { + if File_udpa_annotations_sensitive_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_sensitive_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_sensitive_proto_goTypes, + DependencyIndexes: file_udpa_annotations_sensitive_proto_depIdxs, + ExtensionInfos: file_udpa_annotations_sensitive_proto_extTypes, + }.Build() + File_udpa_annotations_sensitive_proto = out.File + file_udpa_annotations_sensitive_proto_rawDesc = nil + file_udpa_annotations_sensitive_proto_goTypes = nil + file_udpa_annotations_sensitive_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go new file mode 100644 index 000000000..f3fa61974 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go @@ -0,0 +1,36 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/sensitive.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go new file mode 100644 index 000000000..c96818b17 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go @@ -0,0 +1,253 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/status.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PackageVersionStatus int32 + +const ( + PackageVersionStatus_UNKNOWN PackageVersionStatus = 0 + PackageVersionStatus_FROZEN PackageVersionStatus = 1 + PackageVersionStatus_ACTIVE PackageVersionStatus = 2 + PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3 +) + +// Enum value maps for PackageVersionStatus. +var ( + PackageVersionStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FROZEN", + 2: "ACTIVE", + 3: "NEXT_MAJOR_VERSION_CANDIDATE", + } + PackageVersionStatus_value = map[string]int32{ + "UNKNOWN": 0, + "FROZEN": 1, + "ACTIVE": 2, + "NEXT_MAJOR_VERSION_CANDIDATE": 3, + } +) + +func (x PackageVersionStatus) Enum() *PackageVersionStatus { + p := new(PackageVersionStatus) + *p = x + return p +} + +func (x PackageVersionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_udpa_annotations_status_proto_enumTypes[0].Descriptor() +} + +func (PackageVersionStatus) Type() protoreflect.EnumType { + return &file_udpa_annotations_status_proto_enumTypes[0] +} + +func (x PackageVersionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PackageVersionStatus.Descriptor instead. +func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) { + return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0} +} + +type StatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` + PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=udpa.annotations.PackageVersionStatus" json:"package_version_status,omitempty"` +} + +func (x *StatusAnnotation) Reset() { + *x = StatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusAnnotation) ProtoMessage() {} + +func (x *StatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead. +func (*StatusAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0} +} + +func (x *StatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus { + if x != nil { + return x.PackageVersionStatus + } + return PackageVersionStatus_UNKNOWN +} + +var file_udpa_annotations_status_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*StatusAnnotation)(nil), + Field: 222707719, + Name: "udpa.annotations.file_status", + Tag: "bytes,222707719,opt,name=file_status", + Filename: "udpa/annotations/status.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional udpa.annotations.StatusAnnotation file_status = 222707719; + E_FileStatus = &file_udpa_annotations_status_proto_extTypes[0] +) + +var File_udpa_annotations_status_proto protoreflect.FileDescriptor + +var file_udpa_annotations_status_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, + 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a, + 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a, + 0x64, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x87, 0x80, 0x99, + 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_annotations_status_proto_rawDescOnce sync.Once + file_udpa_annotations_status_proto_rawDescData = file_udpa_annotations_status_proto_rawDesc +) + +func file_udpa_annotations_status_proto_rawDescGZIP() []byte { + file_udpa_annotations_status_proto_rawDescOnce.Do(func() { + file_udpa_annotations_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_status_proto_rawDescData) + }) + return file_udpa_annotations_status_proto_rawDescData +} + +var file_udpa_annotations_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_udpa_annotations_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_status_proto_goTypes = []interface{}{ + (PackageVersionStatus)(0), // 0: udpa.annotations.PackageVersionStatus + (*StatusAnnotation)(nil), // 1: udpa.annotations.StatusAnnotation + (*descriptorpb.FileOptions)(nil), // 2: google.protobuf.FileOptions +} +var file_udpa_annotations_status_proto_depIdxs = []int32{ + 0, // 0: udpa.annotations.StatusAnnotation.package_version_status:type_name -> udpa.annotations.PackageVersionStatus + 2, // 1: udpa.annotations.file_status:extendee -> google.protobuf.FileOptions + 1, // 2: udpa.annotations.file_status:type_name -> udpa.annotations.StatusAnnotation + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_status_proto_init() } +func file_udpa_annotations_status_proto_init() { + if File_udpa_annotations_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_status_proto_goTypes, + DependencyIndexes: file_udpa_annotations_status_proto_depIdxs, + EnumInfos: file_udpa_annotations_status_proto_enumTypes, + MessageInfos: file_udpa_annotations_status_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_status_proto_extTypes, + }.Build() + File_udpa_annotations_status_proto = out.File + file_udpa_annotations_status_proto_rawDesc = nil + file_udpa_annotations_status_proto_goTypes = nil + file_udpa_annotations_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go new file mode 100644 index 000000000..5633a8383 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/status.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StatusAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + // no validation rules for PackageVersionStatus + + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + + return nil +} + +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + +// StatusAnnotationValidationError is the validation error returned by +// StatusAnnotation.Validate if the designated constraints aren't met. +type StatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" } + +// Error satisfies the builtin error interface +func (e StatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go new file mode 100644 index 000000000..b3ab9e346 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/versioning.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VersioningAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"` +} + +func (x *VersioningAnnotation) Reset() { + *x = VersioningAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_versioning_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersioningAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersioningAnnotation) ProtoMessage() {} + +func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_versioning_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead. +func (*VersioningAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_versioning_proto_rawDescGZIP(), []int{0} +} + +func (x *VersioningAnnotation) GetPreviousMessageType() string { + if x != nil { + return x.PreviousMessageType + } + return "" +} + +var file_udpa_annotations_versioning_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*VersioningAnnotation)(nil), + Field: 7881811, + Name: "udpa.annotations.versioning", + Tag: "bytes,7881811,opt,name=versioning", + Filename: "udpa/annotations/versioning.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional udpa.annotations.VersioningAnnotation versioning = 7881811; + E_Versioning = &file_udpa_annotations_versioning_proto_extTypes[0] +) + +var File_udpa_annotations_versioning_proto protoreflect.FileDescriptor + +var file_udpa_annotations_versioning_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xd3, 0x88, 0xe1, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x75, 0x64, + 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, + 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_udpa_annotations_versioning_proto_rawDescOnce sync.Once + file_udpa_annotations_versioning_proto_rawDescData = file_udpa_annotations_versioning_proto_rawDesc +) + +func file_udpa_annotations_versioning_proto_rawDescGZIP() []byte { + file_udpa_annotations_versioning_proto_rawDescOnce.Do(func() { + file_udpa_annotations_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_versioning_proto_rawDescData) + }) + return file_udpa_annotations_versioning_proto_rawDescData +} + +var file_udpa_annotations_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_versioning_proto_goTypes = []interface{}{ + (*VersioningAnnotation)(nil), // 0: udpa.annotations.VersioningAnnotation + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_udpa_annotations_versioning_proto_depIdxs = []int32{ + 1, // 0: udpa.annotations.versioning:extendee -> google.protobuf.MessageOptions + 0, // 1: udpa.annotations.versioning:type_name -> udpa.annotations.VersioningAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_versioning_proto_init() } +func file_udpa_annotations_versioning_proto_init() { + if File_udpa_annotations_versioning_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersioningAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_versioning_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_versioning_proto_goTypes, + DependencyIndexes: file_udpa_annotations_versioning_proto_depIdxs, + MessageInfos: file_udpa_annotations_versioning_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_versioning_proto_extTypes, + }.Build() + File_udpa_annotations_versioning_proto = out.File + file_udpa_annotations_versioning_proto_rawDesc = nil + file_udpa_annotations_versioning_proto_goTypes = nil + file_udpa_annotations_versioning_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go new file mode 100644 index 000000000..5fd86baff --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/versioning.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for PreviousMessageType + + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + + return nil +} + +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + +// VersioningAnnotationValidationError is the validation error returned by +// VersioningAnnotation.Validate if the designated constraints aren't met. +type VersioningAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VersioningAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VersioningAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VersioningAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VersioningAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VersioningAnnotationValidationError) ErrorName() string { + return "VersioningAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e VersioningAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVersioningAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VersioningAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VersioningAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go new file mode 100644 index 000000000..e8f23f785 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/type/v1/typed_struct.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypedStruct) Reset() { + *x = TypedStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedStruct) ProtoMessage() {} + +func (x *TypedStruct) ProtoReflect() protoreflect.Message { + mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead. +func (*TypedStruct) Descriptor() ([]byte, []int) { + return file_udpa_type_v1_typed_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedStruct) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *TypedStruct) GetValue() *structpb.Struct { + if x != nil { + return x.Value + } + return nil +} + +var File_udpa_type_v1_typed_struct_proto protoreflect.FileDescriptor + +var file_udpa_type_v1_typed_struct_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, + 0x0b, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_type_v1_typed_struct_proto_rawDescOnce sync.Once + file_udpa_type_v1_typed_struct_proto_rawDescData = file_udpa_type_v1_typed_struct_proto_rawDesc +) + +func file_udpa_type_v1_typed_struct_proto_rawDescGZIP() []byte { + file_udpa_type_v1_typed_struct_proto_rawDescOnce.Do(func() { + file_udpa_type_v1_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_type_v1_typed_struct_proto_rawDescData) + }) + return file_udpa_type_v1_typed_struct_proto_rawDescData +} + +var file_udpa_type_v1_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_type_v1_typed_struct_proto_goTypes = []interface{}{ + (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_udpa_type_v1_typed_struct_proto_depIdxs = []int32{ + 1, // 0: udpa.type.v1.TypedStruct.value:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_udpa_type_v1_typed_struct_proto_init() } +func file_udpa_type_v1_typed_struct_proto_init() { + if File_udpa_type_v1_typed_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_type_v1_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_type_v1_typed_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_udpa_type_v1_typed_struct_proto_goTypes, + DependencyIndexes: file_udpa_type_v1_typed_struct_proto_depIdxs, + MessageInfos: file_udpa_type_v1_typed_struct_proto_msgTypes, + }.Build() + File_udpa_type_v1_typed_struct_proto = out.File + file_udpa_type_v1_typed_struct_proto_rawDesc = nil + file_udpa_type_v1_typed_struct_proto_goTypes = nil + file_udpa_type_v1_typed_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go new file mode 100644 index 000000000..e336fb4a7 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/type/v1/typed_struct.proto + +package v1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TypedStruct) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TypedStructMultiError, or +// nil if none found. +func (m *TypedStruct) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedStruct) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TypedStructMultiError(errors) + } + + return nil +} + +// TypedStructMultiError is an error wrapping multiple validation errors +// returned by TypedStruct.ValidateAll() if the designated constraints aren't met. +type TypedStructMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedStructMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedStructMultiError) AllErrors() []error { return m } + +// TypedStructValidationError is the validation error returned by +// TypedStruct.Validate if the designated constraints aren't met. +type TypedStructValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedStructValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedStructValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedStructValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedStructValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" } + +// Error satisfies the builtin error interface +func (e TypedStructValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedStruct.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedStructValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedStructValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go new file mode 100644 index 000000000..705a71e88 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go @@ -0,0 +1,412 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/migrate.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` +} + +func (x *MigrateAnnotation) Reset() { + *x = MigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateAnnotation) ProtoMessage() {} + +func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead. +func (*MigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{0} +} + +func (x *MigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +type FieldMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` + OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"` +} + +func (x *FieldMigrateAnnotation) Reset() { + *x = FieldMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMigrateAnnotation) ProtoMessage() {} + +func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{1} +} + +func (x *FieldMigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +func (x *FieldMigrateAnnotation) GetOneofPromotion() string { + if x != nil { + return x.OneofPromotion + } + return "" +} + +type FileMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"` +} + +func (x *FileMigrateAnnotation) Reset() { + *x = FileMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileMigrateAnnotation) ProtoMessage() {} + +func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{2} +} + +func (x *FileMigrateAnnotation) GetMoveToPackage() string { + if x != nil { + return x.MoveToPackage + } + return "" +} + +var file_xds_annotations_v3_migrate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.message_migrate", + Tag: "bytes,112948430,opt,name=message_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldMigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.field_migrate", + Tag: "bytes,112948430,opt,name=field_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.enum_migrate", + Tag: "bytes,112948430,opt,name=enum_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.enum_value_migrate", + Tag: "bytes,112948430,opt,name=enum_value_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileMigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.file_migrate", + Tag: "bytes,112948430,opt,name=file_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation message_migrate = 112948430; + E_MessageMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldMigrateAnnotation field_migrate = 112948430; + E_FieldMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation enum_migrate = 112948430; + E_EnumMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation enum_value_migrate = 112948430; + E_EnumValueMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[3] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional xds.annotations.v3.FileMigrateAnnotation file_migrate = 112948430; + E_FileMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[4] +) + +var File_xds_annotations_v3_migrate_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_migrate_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x3f, 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x3a, 0x72, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x71, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x69, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x3a, 0x79, 0x0a, 0x12, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, + 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x6e, + 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x6d, + 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, + 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, 0x2b, 0x5a, + 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_migrate_proto_rawDescOnce sync.Once + file_xds_annotations_v3_migrate_proto_rawDescData = file_xds_annotations_v3_migrate_proto_rawDesc +) + +func file_xds_annotations_v3_migrate_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_migrate_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_migrate_proto_rawDescData) + }) + return file_xds_annotations_v3_migrate_proto_rawDescData +} + +var file_xds_annotations_v3_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_xds_annotations_v3_migrate_proto_goTypes = []interface{}{ + (*MigrateAnnotation)(nil), // 0: xds.annotations.v3.MigrateAnnotation + (*FieldMigrateAnnotation)(nil), // 1: xds.annotations.v3.FieldMigrateAnnotation + (*FileMigrateAnnotation)(nil), // 2: xds.annotations.v3.FileMigrateAnnotation + (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions + (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions + (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions +} +var file_xds_annotations_v3_migrate_proto_depIdxs = []int32{ + 3, // 0: xds.annotations.v3.message_migrate:extendee -> google.protobuf.MessageOptions + 4, // 1: xds.annotations.v3.field_migrate:extendee -> google.protobuf.FieldOptions + 5, // 2: xds.annotations.v3.enum_migrate:extendee -> google.protobuf.EnumOptions + 6, // 3: xds.annotations.v3.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions + 7, // 4: xds.annotations.v3.file_migrate:extendee -> google.protobuf.FileOptions + 0, // 5: xds.annotations.v3.message_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 1, // 6: xds.annotations.v3.field_migrate:type_name -> xds.annotations.v3.FieldMigrateAnnotation + 0, // 7: xds.annotations.v3.enum_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 0, // 8: xds.annotations.v3.enum_value_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 2, // 9: xds.annotations.v3.file_migrate:type_name -> xds.annotations.v3.FileMigrateAnnotation + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 5, // [5:10] is the sub-list for extension type_name + 0, // [0:5] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_migrate_proto_init() } +func file_xds_annotations_v3_migrate_proto_init() { + if File_xds_annotations_v3_migrate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_migrate_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 5, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_migrate_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_migrate_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_migrate_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_migrate_proto_extTypes, + }.Build() + File_xds_annotations_v3_migrate_proto = out.File + file_xds_annotations_v3_migrate_proto_rawDesc = nil + file_xds_annotations_v3_migrate_proto_goTypes = nil + file_xds_annotations_v3_migrate_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go new file mode 100644 index 000000000..d57d77824 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go @@ -0,0 +1,350 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/migrate.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MigrateAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + + return nil +} + +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + +// MigrateAnnotationValidationError is the validation error returned by +// MigrateAnnotation.Validate if the designated constraints aren't met. +type MigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MigrateAnnotationValidationError) ErrorName() string { + return "MigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MigrateAnnotationValidationError{} + +// Validate checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + // no validation rules for OneofPromotion + + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FieldMigrateAnnotationValidationError is the validation error returned by +// FieldMigrateAnnotation.Validate if the designated constraints aren't met. +type FieldMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldMigrateAnnotationValidationError) ErrorName() string { + return "FieldMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldMigrateAnnotationValidationError{} + +// Validate checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MoveToPackage + + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FileMigrateAnnotationValidationError is the validation error returned by +// FileMigrateAnnotation.Validate if the designated constraints aren't met. +type FileMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileMigrateAnnotationValidationError) ErrorName() string { + return "FileMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileMigrateAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go new file mode 100644 index 000000000..0278e5165 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/security.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FieldSecurityAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"` + ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"` +} + +func (x *FieldSecurityAnnotation) Reset() { + *x = FieldSecurityAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_security_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldSecurityAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldSecurityAnnotation) ProtoMessage() {} + +func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_security_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead. +func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_security_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool { + if x != nil { + return x.ConfigureForUntrustedDownstream + } + return false +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool { + if x != nil { + return x.ConfigureForUntrustedUpstream + } + return false +} + +var file_xds_annotations_v3_security_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldSecurityAnnotation)(nil), + Field: 99044135, + Name: "xds.annotations.v3.security", + Tag: "bytes,99044135,opt,name=security", + Filename: "xds/annotations/v3/security.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldSecurityAnnotation security = 99044135; + E_Security = &file_xds_annotations_v3_security_proto_extTypes[0] +) + +var File_xds_annotations_v3_security_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_security_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x69, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa7, 0x96, 0x9d, 0x2f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, + 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_security_proto_rawDescOnce sync.Once + file_xds_annotations_v3_security_proto_rawDescData = file_xds_annotations_v3_security_proto_rawDesc +) + +func file_xds_annotations_v3_security_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_security_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_security_proto_rawDescData) + }) + return file_xds_annotations_v3_security_proto_rawDescData +} + +var file_xds_annotations_v3_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_annotations_v3_security_proto_goTypes = []interface{}{ + (*FieldSecurityAnnotation)(nil), // 0: xds.annotations.v3.FieldSecurityAnnotation + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_security_proto_depIdxs = []int32{ + 1, // 0: xds.annotations.v3.security:extendee -> google.protobuf.FieldOptions + 0, // 1: xds.annotations.v3.security:type_name -> xds.annotations.v3.FieldSecurityAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_security_proto_init() } +func file_xds_annotations_v3_security_proto_init() { + if File_xds_annotations_v3_security_proto != nil { + return + } + file_xds_annotations_v3_status_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldSecurityAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_security_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_security_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_security_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_security_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_security_proto_extTypes, + }.Build() + File_xds_annotations_v3_security_proto = out.File + file_xds_annotations_v3_security_proto_rawDesc = nil + file_xds_annotations_v3_security_proto_goTypes = nil + file_xds_annotations_v3_security_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go new file mode 100644 index 000000000..ac0143f27 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/security.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FieldSecurityAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ConfigureForUntrustedDownstream + + // no validation rules for ConfigureForUntrustedUpstream + + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + + return nil +} + +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + +// FieldSecurityAnnotationValidationError is the validation error returned by +// FieldSecurityAnnotation.Validate if the designated constraints aren't met. +type FieldSecurityAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldSecurityAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldSecurityAnnotationValidationError) ErrorName() string { + return "FieldSecurityAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldSecurityAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldSecurityAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldSecurityAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldSecurityAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go new file mode 100644 index 000000000..57161aab4 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/sensitive.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_xds_annotations_v3_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 61008053, + Name: "xds.annotations.v3.sensitive", + Tag: "varint,61008053,opt,name=sensitive", + Filename: "xds/annotations/v3/sensitive.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool sensitive = 61008053; + E_Sensitive = &file_xds_annotations_v3_sensitive_proto_extTypes[0] +) + +var File_xds_annotations_v3_sensitive_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_sensitive_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb5, 0xd1, 0x8b, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_xds_annotations_v3_sensitive_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_sensitive_proto_depIdxs = []int32{ + 0, // 0: xds.annotations.v3.sensitive:extendee -> google.protobuf.FieldOptions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_sensitive_proto_init() } +func file_xds_annotations_v3_sensitive_proto_init() { + if File_xds_annotations_v3_sensitive_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_sensitive_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_sensitive_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_sensitive_proto_depIdxs, + ExtensionInfos: file_xds_annotations_v3_sensitive_proto_extTypes, + }.Build() + File_xds_annotations_v3_sensitive_proto = out.File + file_xds_annotations_v3_sensitive_proto_rawDesc = nil + file_xds_annotations_v3_sensitive_proto_goTypes = nil + file_xds_annotations_v3_sensitive_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go new file mode 100644 index 000000000..c101d3acc --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go @@ -0,0 +1,36 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/sensitive.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go new file mode 100644 index 000000000..255d109fc --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go @@ -0,0 +1,495 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/status.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PackageVersionStatus int32 + +const ( + PackageVersionStatus_UNKNOWN PackageVersionStatus = 0 + PackageVersionStatus_FROZEN PackageVersionStatus = 1 + PackageVersionStatus_ACTIVE PackageVersionStatus = 2 + PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3 +) + +// Enum value maps for PackageVersionStatus. +var ( + PackageVersionStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FROZEN", + 2: "ACTIVE", + 3: "NEXT_MAJOR_VERSION_CANDIDATE", + } + PackageVersionStatus_value = map[string]int32{ + "UNKNOWN": 0, + "FROZEN": 1, + "ACTIVE": 2, + "NEXT_MAJOR_VERSION_CANDIDATE": 3, + } +) + +func (x PackageVersionStatus) Enum() *PackageVersionStatus { + p := new(PackageVersionStatus) + *p = x + return p +} + +func (x PackageVersionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_xds_annotations_v3_status_proto_enumTypes[0].Descriptor() +} + +func (PackageVersionStatus) Type() protoreflect.EnumType { + return &file_xds_annotations_v3_status_proto_enumTypes[0] +} + +func (x PackageVersionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PackageVersionStatus.Descriptor instead. +func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0} +} + +type FileStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *FileStatusAnnotation) Reset() { + *x = FileStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatusAnnotation) ProtoMessage() {} + +func (x *FileStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatusAnnotation.ProtoReflect.Descriptor instead. +func (*FileStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0} +} + +func (x *FileStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type MessageStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *MessageStatusAnnotation) Reset() { + *x = MessageStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageStatusAnnotation) ProtoMessage() {} + +func (x *MessageStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageStatusAnnotation.ProtoReflect.Descriptor instead. +func (*MessageStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type FieldStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *FieldStatusAnnotation) Reset() { + *x = FieldStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldStatusAnnotation) ProtoMessage() {} + +func (x *FieldStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldStatusAnnotation.ProtoReflect.Descriptor instead. +func (*FieldStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{2} +} + +func (x *FieldStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type StatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` + PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=xds.annotations.v3.PackageVersionStatus" json:"package_version_status,omitempty"` +} + +func (x *StatusAnnotation) Reset() { + *x = StatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusAnnotation) ProtoMessage() {} + +func (x *StatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead. +func (*StatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus { + if x != nil { + return x.PackageVersionStatus + } + return PackageVersionStatus_UNKNOWN +} + +var file_xds_annotations_v3_status_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.file_status", + Tag: "bytes,226829418,opt,name=file_status", + Filename: "xds/annotations/v3/status.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MessageStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.message_status", + Tag: "bytes,226829418,opt,name=message_status", + Filename: "xds/annotations/v3/status.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.field_status", + Tag: "bytes,226829418,opt,name=field_status", + Filename: "xds/annotations/v3/status.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional xds.annotations.v3.FileStatusAnnotation file_status = 226829418; + E_FileStatus = &file_xds_annotations_v3_status_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.MessageStatusAnnotation message_status = 226829418; + E_MessageStatus = &file_xds_annotations_v3_status_proto_extTypes[1] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldStatusAnnotation field_status = 226829418; + E_FieldStatus = &file_xds_annotations_v3_status_proto_extTypes[2] +) + +var File_xds_annotations_v3_status_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_status_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x40, 0x0a, 0x14, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x43, 0x0a, 0x17, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x41, + 0x0a, 0x15, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x5e, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a, + 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a, + 0x6a, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, + 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x76, 0x0a, 0x0e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, + 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x3a, 0x6e, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_status_proto_rawDescOnce sync.Once + file_xds_annotations_v3_status_proto_rawDescData = file_xds_annotations_v3_status_proto_rawDesc +) + +func file_xds_annotations_v3_status_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_status_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_status_proto_rawDescData) + }) + return file_xds_annotations_v3_status_proto_rawDescData +} + +var file_xds_annotations_v3_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_xds_annotations_v3_status_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_xds_annotations_v3_status_proto_goTypes = []interface{}{ + (PackageVersionStatus)(0), // 0: xds.annotations.v3.PackageVersionStatus + (*FileStatusAnnotation)(nil), // 1: xds.annotations.v3.FileStatusAnnotation + (*MessageStatusAnnotation)(nil), // 2: xds.annotations.v3.MessageStatusAnnotation + (*FieldStatusAnnotation)(nil), // 3: xds.annotations.v3.FieldStatusAnnotation + (*StatusAnnotation)(nil), // 4: xds.annotations.v3.StatusAnnotation + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 7: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_status_proto_depIdxs = []int32{ + 0, // 0: xds.annotations.v3.StatusAnnotation.package_version_status:type_name -> xds.annotations.v3.PackageVersionStatus + 5, // 1: xds.annotations.v3.file_status:extendee -> google.protobuf.FileOptions + 6, // 2: xds.annotations.v3.message_status:extendee -> google.protobuf.MessageOptions + 7, // 3: xds.annotations.v3.field_status:extendee -> google.protobuf.FieldOptions + 1, // 4: xds.annotations.v3.file_status:type_name -> xds.annotations.v3.FileStatusAnnotation + 2, // 5: xds.annotations.v3.message_status:type_name -> xds.annotations.v3.MessageStatusAnnotation + 3, // 6: xds.annotations.v3.field_status:type_name -> xds.annotations.v3.FieldStatusAnnotation + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 4, // [4:7] is the sub-list for extension type_name + 1, // [1:4] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_status_proto_init() } +func file_xds_annotations_v3_status_proto_init() { + if File_xds_annotations_v3_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_status_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_status_proto_depIdxs, + EnumInfos: file_xds_annotations_v3_status_proto_enumTypes, + MessageInfos: file_xds_annotations_v3_status_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_status_proto_extTypes, + }.Build() + File_xds_annotations_v3_status_proto = out.File + file_xds_annotations_v3_status_proto_rawDesc = nil + file_xds_annotations_v3_status_proto_goTypes = nil + file_xds_annotations_v3_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go new file mode 100644 index 000000000..a87dbee8d --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go @@ -0,0 +1,452 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/status.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FileStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileStatusAnnotationMultiError, or nil if none found. +func (m *FileStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return FileStatusAnnotationMultiError(errors) + } + + return nil +} + +// FileStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileStatusAnnotationMultiError) AllErrors() []error { return m } + +// FileStatusAnnotationValidationError is the validation error returned by +// FileStatusAnnotation.Validate if the designated constraints aren't met. +type FileStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileStatusAnnotationValidationError) ErrorName() string { + return "FileStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileStatusAnnotationValidationError{} + +// Validate checks the field values on MessageStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MessageStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MessageStatusAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MessageStatusAnnotationMultiError, or nil if none found. +func (m *MessageStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MessageStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return MessageStatusAnnotationMultiError(errors) + } + + return nil +} + +// MessageStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by MessageStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type MessageStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MessageStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m } + +// MessageStatusAnnotationValidationError is the validation error returned by +// MessageStatusAnnotation.Validate if the designated constraints aren't met. +type MessageStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MessageStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MessageStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MessageStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MessageStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MessageStatusAnnotationValidationError) ErrorName() string { + return "MessageStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MessageStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMessageStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MessageStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MessageStatusAnnotationValidationError{} + +// Validate checks the field values on FieldStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldStatusAnnotationMultiError, or nil if none found. +func (m *FieldStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return FieldStatusAnnotationMultiError(errors) + } + + return nil +} + +// FieldStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m } + +// FieldStatusAnnotationValidationError is the validation error returned by +// FieldStatusAnnotation.Validate if the designated constraints aren't met. +type FieldStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldStatusAnnotationValidationError) ErrorName() string { + return "FieldStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldStatusAnnotationValidationError{} + +// Validate checks the field values on StatusAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + // no validation rules for PackageVersionStatus + + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + + return nil +} + +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + +// StatusAnnotationValidationError is the validation error returned by +// StatusAnnotation.Validate if the designated constraints aren't met. +type StatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" } + +// Error satisfies the builtin error interface +func (e StatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go new file mode 100644 index 000000000..2de032f15 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/versioning.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VersioningAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"` +} + +func (x *VersioningAnnotation) Reset() { + *x = VersioningAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersioningAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersioningAnnotation) ProtoMessage() {} + +func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead. +func (*VersioningAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_versioning_proto_rawDescGZIP(), []int{0} +} + +func (x *VersioningAnnotation) GetPreviousMessageType() string { + if x != nil { + return x.PreviousMessageType + } + return "" +} + +var file_xds_annotations_v3_versioning_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*VersioningAnnotation)(nil), + Field: 92389011, + Name: "xds.annotations.v3.versioning", + Tag: "bytes,92389011,opt,name=versioning", + Filename: "xds/annotations/v3/versioning.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.VersioningAnnotation versioning = 92389011; + E_Versioning = &file_xds_annotations_v3_versioning_proto_extTypes[0] +) + +var File_xds_annotations_v3_versioning_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_versioning_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x93, 0xfd, 0x86, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_versioning_proto_rawDescOnce sync.Once + file_xds_annotations_v3_versioning_proto_rawDescData = file_xds_annotations_v3_versioning_proto_rawDesc +) + +func file_xds_annotations_v3_versioning_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_versioning_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_versioning_proto_rawDescData) + }) + return file_xds_annotations_v3_versioning_proto_rawDescData +} + +var file_xds_annotations_v3_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_annotations_v3_versioning_proto_goTypes = []interface{}{ + (*VersioningAnnotation)(nil), // 0: xds.annotations.v3.VersioningAnnotation + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_xds_annotations_v3_versioning_proto_depIdxs = []int32{ + 1, // 0: xds.annotations.v3.versioning:extendee -> google.protobuf.MessageOptions + 0, // 1: xds.annotations.v3.versioning:type_name -> xds.annotations.v3.VersioningAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_versioning_proto_init() } +func file_xds_annotations_v3_versioning_proto_init() { + if File_xds_annotations_v3_versioning_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersioningAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_versioning_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_versioning_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_versioning_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_versioning_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_versioning_proto_extTypes, + }.Build() + File_xds_annotations_v3_versioning_proto = out.File + file_xds_annotations_v3_versioning_proto_rawDesc = nil + file_xds_annotations_v3_versioning_proto_goTypes = nil + file_xds_annotations_v3_versioning_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go new file mode 100644 index 000000000..042c266e1 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/versioning.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for PreviousMessageType + + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + + return nil +} + +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + +// VersioningAnnotationValidationError is the validation error returned by +// VersioningAnnotation.Validate if the designated constraints aren't met. +type VersioningAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VersioningAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VersioningAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VersioningAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VersioningAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VersioningAnnotationValidationError) ErrorName() string { + return "VersioningAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e VersioningAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVersioningAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VersioningAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VersioningAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go new file mode 100644 index 000000000..3058286d5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/authority.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Authority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Authority) Reset() { + *x = Authority{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_authority_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Authority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Authority) ProtoMessage() {} + +func (x *Authority) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_authority_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Authority.ProtoReflect.Descriptor instead. +func (*Authority) Descriptor() ([]byte, []int) { + return file_xds_core_v3_authority_proto_rawDescGZIP(), []int{0} +} + +func (x *Authority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_xds_core_v3_authority_proto protoreflect.FileDescriptor + +var file_xds_core_v3_authority_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_authority_proto_rawDescOnce sync.Once + file_xds_core_v3_authority_proto_rawDescData = file_xds_core_v3_authority_proto_rawDesc +) + +func file_xds_core_v3_authority_proto_rawDescGZIP() []byte { + file_xds_core_v3_authority_proto_rawDescOnce.Do(func() { + file_xds_core_v3_authority_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_authority_proto_rawDescData) + }) + return file_xds_core_v3_authority_proto_rawDescData +} + +var file_xds_core_v3_authority_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_authority_proto_goTypes = []interface{}{ + (*Authority)(nil), // 0: xds.core.v3.Authority +} +var file_xds_core_v3_authority_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_authority_proto_init() } +func file_xds_core_v3_authority_proto_init() { + if File_xds_core_v3_authority_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_authority_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Authority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_authority_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_authority_proto_goTypes, + DependencyIndexes: file_xds_core_v3_authority_proto_depIdxs, + MessageInfos: file_xds_core_v3_authority_proto_msgTypes, + }.Build() + File_xds_core_v3_authority_proto = out.File + file_xds_core_v3_authority_proto_rawDesc = nil + file_xds_core_v3_authority_proto_goTypes = nil + file_xds_core_v3_authority_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go new file mode 100644 index 000000000..94317c2af --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/authority.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Authority with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Authority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Authority with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AuthorityMultiError, or nil +// if none found. +func (m *Authority) ValidateAll() error { + return m.validate(true) +} + +func (m *Authority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := AuthorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AuthorityMultiError(errors) + } + + return nil +} + +// AuthorityMultiError is an error wrapping multiple validation errors returned +// by Authority.ValidateAll() if the designated constraints aren't met. +type AuthorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AuthorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AuthorityMultiError) AllErrors() []error { return m } + +// AuthorityValidationError is the validation error returned by +// Authority.Validate if the designated constraints aren't met. +type AuthorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AuthorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AuthorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AuthorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AuthorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AuthorityValidationError) ErrorName() string { return "AuthorityValidationError" } + +// Error satisfies the builtin error interface +func (e AuthorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAuthority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AuthorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AuthorityValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go new file mode 100644 index 000000000..0e339b589 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go @@ -0,0 +1,172 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/cidr.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CidrRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` +} + +func (x *CidrRange) Reset() { + *x = CidrRange{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_cidr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CidrRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CidrRange) ProtoMessage() {} + +func (x *CidrRange) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_cidr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. +func (*CidrRange) Descriptor() ([]byte, []int) { + return file_xds_core_v3_cidr_proto_rawDescGZIP(), []int{0} +} + +func (x *CidrRange) GetAddressPrefix() string { + if x != nil { + return x.AddressPrefix + } + return "" +} + +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.PrefixLen + } + return nil +} + +var File_xds_core_v3_cidr_proto protoreflect.FileDescriptor + +var file_xds_core_v3_cidr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, + 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, + 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, + 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_cidr_proto_rawDescOnce sync.Once + file_xds_core_v3_cidr_proto_rawDescData = file_xds_core_v3_cidr_proto_rawDesc +) + +func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte { + file_xds_core_v3_cidr_proto_rawDescOnce.Do(func() { + file_xds_core_v3_cidr_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_cidr_proto_rawDescData) + }) + return file_xds_core_v3_cidr_proto_rawDescData +} + +var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_cidr_proto_goTypes = []interface{}{ + (*CidrRange)(nil), // 0: xds.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value +} +var file_xds_core_v3_cidr_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_cidr_proto_init() } +func file_xds_core_v3_cidr_proto_init() { + if File_xds_core_v3_cidr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_cidr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CidrRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_cidr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_cidr_proto_goTypes, + DependencyIndexes: file_xds_core_v3_cidr_proto_depIdxs, + MessageInfos: file_xds_core_v3_cidr_proto_msgTypes, + }.Build() + File_xds_core_v3_cidr_proto = out.File + file_xds_core_v3_cidr_proto_rawDesc = nil + file_xds_core_v3_cidr_proto_goTypes = nil + file_xds_core_v3_cidr_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go new file mode 100644 index 000000000..43327f56b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/cidr.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CidrRange with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CidrRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CidrRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CidrRangeMultiError, or nil +// if none found. +func (m *CidrRange) ValidateAll() error { + return m.validate(true) +} + +func (m *CidrRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 { + err := CidrRangeValidationError{ + field: "AddressPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetPrefixLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := CidrRangeValidationError{ + field: "PrefixLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CidrRangeMultiError(errors) + } + + return nil +} + +// CidrRangeMultiError is an error wrapping multiple validation errors returned +// by CidrRange.ValidateAll() if the designated constraints aren't met. +type CidrRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CidrRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CidrRangeMultiError) AllErrors() []error { return m } + +// CidrRangeValidationError is the validation error returned by +// CidrRange.Validate if the designated constraints aren't met. +type CidrRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CidrRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CidrRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CidrRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CidrRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" } + +// Error satisfies the builtin error interface +func (e CidrRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCidrRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CidrRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CidrRangeValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go new file mode 100644 index 000000000..0d45b961b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go @@ -0,0 +1,297 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/collection_entry.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CollectionEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ResourceSpecifier: + // + // *CollectionEntry_Locator + // *CollectionEntry_InlineEntry_ + ResourceSpecifier isCollectionEntry_ResourceSpecifier `protobuf_oneof:"resource_specifier"` +} + +func (x *CollectionEntry) Reset() { + *x = CollectionEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionEntry) ProtoMessage() {} + +func (x *CollectionEntry) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionEntry.ProtoReflect.Descriptor instead. +func (*CollectionEntry) Descriptor() ([]byte, []int) { + return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0} +} + +func (m *CollectionEntry) GetResourceSpecifier() isCollectionEntry_ResourceSpecifier { + if m != nil { + return m.ResourceSpecifier + } + return nil +} + +func (x *CollectionEntry) GetLocator() *ResourceLocator { + if x, ok := x.GetResourceSpecifier().(*CollectionEntry_Locator); ok { + return x.Locator + } + return nil +} + +func (x *CollectionEntry) GetInlineEntry() *CollectionEntry_InlineEntry { + if x, ok := x.GetResourceSpecifier().(*CollectionEntry_InlineEntry_); ok { + return x.InlineEntry + } + return nil +} + +type isCollectionEntry_ResourceSpecifier interface { + isCollectionEntry_ResourceSpecifier() +} + +type CollectionEntry_Locator struct { + Locator *ResourceLocator `protobuf:"bytes,1,opt,name=locator,proto3,oneof"` +} + +type CollectionEntry_InlineEntry_ struct { + InlineEntry *CollectionEntry_InlineEntry `protobuf:"bytes,2,opt,name=inline_entry,json=inlineEntry,proto3,oneof"` +} + +func (*CollectionEntry_Locator) isCollectionEntry_ResourceSpecifier() {} + +func (*CollectionEntry_InlineEntry_) isCollectionEntry_ResourceSpecifier() {} + +type CollectionEntry_InlineEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *CollectionEntry_InlineEntry) Reset() { + *x = CollectionEntry_InlineEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionEntry_InlineEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionEntry_InlineEntry) ProtoMessage() {} + +func (x *CollectionEntry_InlineEntry) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionEntry_InlineEntry.ProtoReflect.Descriptor instead. +func (*CollectionEntry_InlineEntry) Descriptor() ([]byte, []int) { + return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CollectionEntry_InlineEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CollectionEntry_InlineEntry) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +var File_xds_core_v3_collection_entry_proto protoreflect.FileDescriptor + +var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, + 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, + 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x02, 0x0a, 0x0f, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, + 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, + 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x8b, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x32, 0x15, 0x5e, 0x5b, + 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x7e, 0x3a, + 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_collection_entry_proto_rawDescOnce sync.Once + file_xds_core_v3_collection_entry_proto_rawDescData = file_xds_core_v3_collection_entry_proto_rawDesc +) + +func file_xds_core_v3_collection_entry_proto_rawDescGZIP() []byte { + file_xds_core_v3_collection_entry_proto_rawDescOnce.Do(func() { + file_xds_core_v3_collection_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_collection_entry_proto_rawDescData) + }) + return file_xds_core_v3_collection_entry_proto_rawDescData +} + +var file_xds_core_v3_collection_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{ + (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry + (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry + (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{ + 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator + 1, // 1: xds.core.v3.CollectionEntry.inline_entry:type_name -> xds.core.v3.CollectionEntry.InlineEntry + 3, // 2: xds.core.v3.CollectionEntry.InlineEntry.resource:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_collection_entry_proto_init() } +func file_xds_core_v3_collection_entry_proto_init() { + if File_xds_core_v3_collection_entry_proto != nil { + return + } + file_xds_core_v3_resource_locator_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_collection_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_core_v3_collection_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionEntry_InlineEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_core_v3_collection_entry_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CollectionEntry_Locator)(nil), + (*CollectionEntry_InlineEntry_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_collection_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_collection_entry_proto_goTypes, + DependencyIndexes: file_xds_core_v3_collection_entry_proto_depIdxs, + MessageInfos: file_xds_core_v3_collection_entry_proto_msgTypes, + }.Build() + File_xds_core_v3_collection_entry_proto = out.File + file_xds_core_v3_collection_entry_proto_rawDesc = nil + file_xds_core_v3_collection_entry_proto_goTypes = nil + file_xds_core_v3_collection_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go new file mode 100644 index 000000000..610990b7f --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go @@ -0,0 +1,383 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/collection_entry.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CollectionEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CollectionEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntryMultiError, or nil if none found. +func (m *CollectionEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofResourceSpecifierPresent := false + switch v := m.ResourceSpecifier.(type) { + case *CollectionEntry_Locator: + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocator()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CollectionEntry_InlineEntry_: + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetInlineEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofResourceSpecifierPresent { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CollectionEntryMultiError(errors) + } + + return nil +} + +// CollectionEntryMultiError is an error wrapping multiple validation errors +// returned by CollectionEntry.ValidateAll() if the designated constraints +// aren't met. +type CollectionEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntryMultiError) AllErrors() []error { return m } + +// CollectionEntryValidationError is the validation error returned by +// CollectionEntry.Validate if the designated constraints aren't met. +type CollectionEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CollectionEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CollectionEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CollectionEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CollectionEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CollectionEntryValidationError) ErrorName() string { return "CollectionEntryValidationError" } + +// Error satisfies the builtin error interface +func (e CollectionEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCollectionEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CollectionEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CollectionEntryValidationError{} + +// Validate checks the field values on CollectionEntry_InlineEntry with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CollectionEntry_InlineEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry_InlineEntry with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntry_InlineEntryMultiError, or nil if none found. +func (m *CollectionEntry_InlineEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry_InlineEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) { + err := CollectionEntry_InlineEntryValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CollectionEntry_InlineEntryMultiError(errors) + } + + return nil +} + +// CollectionEntry_InlineEntryMultiError is an error wrapping multiple +// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if +// the designated constraints aren't met. +type CollectionEntry_InlineEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntry_InlineEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m } + +// CollectionEntry_InlineEntryValidationError is the validation error returned +// by CollectionEntry_InlineEntry.Validate if the designated constraints +// aren't met. +type CollectionEntry_InlineEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CollectionEntry_InlineEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CollectionEntry_InlineEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CollectionEntry_InlineEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CollectionEntry_InlineEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CollectionEntry_InlineEntryValidationError) ErrorName() string { + return "CollectionEntry_InlineEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e CollectionEntry_InlineEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCollectionEntry_InlineEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CollectionEntry_InlineEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CollectionEntry_InlineEntryValidationError{} + +var _CollectionEntry_InlineEntry_Name_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\.~:]+$") diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go new file mode 100644 index 000000000..714ab4367 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/context_params.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ContextParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ContextParams) Reset() { + *x = ContextParams{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_context_params_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContextParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContextParams) ProtoMessage() {} + +func (x *ContextParams) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_context_params_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContextParams.ProtoReflect.Descriptor instead. +func (*ContextParams) Descriptor() ([]byte, []int) { + return file_xds_core_v3_context_params_proto_rawDescGZIP(), []int{0} +} + +func (x *ContextParams) GetParams() map[string]string { + if x != nil { + return x.Params + } + return nil +} + +var File_xds_core_v3_context_params_proto protoreflect.FileDescriptor + +var file_xds_core_v3_context_params_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x8a, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3e, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_xds_core_v3_context_params_proto_rawDescOnce sync.Once + file_xds_core_v3_context_params_proto_rawDescData = file_xds_core_v3_context_params_proto_rawDesc +) + +func file_xds_core_v3_context_params_proto_rawDescGZIP() []byte { + file_xds_core_v3_context_params_proto_rawDescOnce.Do(func() { + file_xds_core_v3_context_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_context_params_proto_rawDescData) + }) + return file_xds_core_v3_context_params_proto_rawDescData +} + +var file_xds_core_v3_context_params_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_context_params_proto_goTypes = []interface{}{ + (*ContextParams)(nil), // 0: xds.core.v3.ContextParams + nil, // 1: xds.core.v3.ContextParams.ParamsEntry +} +var file_xds_core_v3_context_params_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.ContextParams.params:type_name -> xds.core.v3.ContextParams.ParamsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_context_params_proto_init() } +func file_xds_core_v3_context_params_proto_init() { + if File_xds_core_v3_context_params_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_context_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContextParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_context_params_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_context_params_proto_goTypes, + DependencyIndexes: file_xds_core_v3_context_params_proto_depIdxs, + MessageInfos: file_xds_core_v3_context_params_proto_msgTypes, + }.Build() + File_xds_core_v3_context_params_proto = out.File + file_xds_core_v3_context_params_proto_rawDesc = nil + file_xds_core_v3_context_params_proto_goTypes = nil + file_xds_core_v3_context_params_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go new file mode 100644 index 000000000..1c9accaa3 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/context_params.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ContextParams with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ContextParams) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ContextParams with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ContextParamsMultiError, or +// nil if none found. +func (m *ContextParams) ValidateAll() error { + return m.validate(true) +} + +func (m *ContextParams) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Params + + if len(errors) > 0 { + return ContextParamsMultiError(errors) + } + + return nil +} + +// ContextParamsMultiError is an error wrapping multiple validation errors +// returned by ContextParams.ValidateAll() if the designated constraints +// aren't met. +type ContextParamsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ContextParamsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ContextParamsMultiError) AllErrors() []error { return m } + +// ContextParamsValidationError is the validation error returned by +// ContextParams.Validate if the designated constraints aren't met. +type ContextParamsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ContextParamsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ContextParamsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ContextParamsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ContextParamsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ContextParamsValidationError) ErrorName() string { return "ContextParamsValidationError" } + +// Error satisfies the builtin error interface +func (e ContextParamsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sContextParams.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ContextParamsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ContextParamsValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go new file mode 100644 index 000000000..be4ea10c6 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/extension.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedExtensionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *TypedExtensionConfig) Reset() { + *x = TypedExtensionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedExtensionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedExtensionConfig) ProtoMessage() {} + +func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead. +func (*TypedExtensionConfig) Descriptor() ([]byte, []int) { + return file_xds_core_v3_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedExtensionConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +var File_xds_core_v3_extension_proto protoreflect.FileDescriptor + +var file_xds_core_v3_extension_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76, + 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x4e, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_extension_proto_rawDescOnce sync.Once + file_xds_core_v3_extension_proto_rawDescData = file_xds_core_v3_extension_proto_rawDesc +) + +func file_xds_core_v3_extension_proto_rawDescGZIP() []byte { + file_xds_core_v3_extension_proto_rawDescOnce.Do(func() { + file_xds_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_extension_proto_rawDescData) + }) + return file_xds_core_v3_extension_proto_rawDescData +} + +var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_extension_proto_goTypes = []interface{}{ + (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_xds_core_v3_extension_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_extension_proto_init() } +func file_xds_core_v3_extension_proto_init() { + if File_xds_core_v3_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedExtensionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_extension_proto_goTypes, + DependencyIndexes: file_xds_core_v3_extension_proto_depIdxs, + MessageInfos: file_xds_core_v3_extension_proto_msgTypes, + }.Build() + File_xds_core_v3_extension_proto = out.File + file_xds_core_v3_extension_proto_rawDesc = nil + file_xds_core_v3_extension_proto_goTypes = nil + file_xds_core_v3_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go new file mode 100644 index 000000000..839f3fef7 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/extension.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TypedExtensionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TypedExtensionConfigMultiError, or nil if none found. +func (m *TypedExtensionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedExtensionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TypedExtensionConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTypedConfig() == nil { + err := TypedExtensionConfigValidationError{ + field: "TypedConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetTypedConfig(); a != nil { + + } + + if len(errors) > 0 { + return TypedExtensionConfigMultiError(errors) + } + + return nil +} + +// TypedExtensionConfigMultiError is an error wrapping multiple validation +// errors returned by TypedExtensionConfig.ValidateAll() if the designated +// constraints aren't met. +type TypedExtensionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedExtensionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedExtensionConfigMultiError) AllErrors() []error { return m } + +// TypedExtensionConfigValidationError is the validation error returned by +// TypedExtensionConfig.Validate if the designated constraints aren't met. +type TypedExtensionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedExtensionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedExtensionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedExtensionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedExtensionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedExtensionConfigValidationError) ErrorName() string { + return "TypedExtensionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TypedExtensionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedExtensionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedExtensionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedExtensionConfigValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go new file mode 100644 index 000000000..641e3411a --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetName() *ResourceName { + if x != nil { + return x.Name + } + return nil +} + +func (x *Resource) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Resource) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +var File_xds_core_v3_resource_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_proto_rawDescData = file_xds_core_v3_resource_proto_rawDesc +) + +func file_xds_core_v3_resource_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_proto_rawDescData) + }) + return file_xds_core_v3_resource_proto_rawDescData +} + +var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: xds.core.v3.Resource + (*ResourceName)(nil), // 1: xds.core.v3.ResourceName + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_xds_core_v3_resource_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName + 2, // 1: xds.core.v3.Resource.resource:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_proto_init() } +func file_xds_core_v3_resource_proto_init() { + if File_xds_core_v3_resource_proto != nil { + return + } + file_xds_core_v3_resource_name_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_proto_depIdxs, + MessageInfos: file_xds_core_v3_resource_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_proto = out.File + file_xds_core_v3_resource_proto_rawDesc = nil + file_xds_core_v3_resource_proto_goTypes = nil + file_xds_core_v3_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go new file mode 100644 index 000000000..dc972171c --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go @@ -0,0 +1,195 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go new file mode 100644 index 000000000..3f99d4bee --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource_locator.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceLocator_Scheme int32 + +const ( + ResourceLocator_XDSTP ResourceLocator_Scheme = 0 + ResourceLocator_HTTP ResourceLocator_Scheme = 1 + ResourceLocator_FILE ResourceLocator_Scheme = 2 +) + +// Enum value maps for ResourceLocator_Scheme. +var ( + ResourceLocator_Scheme_name = map[int32]string{ + 0: "XDSTP", + 1: "HTTP", + 2: "FILE", + } + ResourceLocator_Scheme_value = map[string]int32{ + "XDSTP": 0, + "HTTP": 1, + "FILE": 2, + } +) + +func (x ResourceLocator_Scheme) Enum() *ResourceLocator_Scheme { + p := new(ResourceLocator_Scheme) + *p = x + return p +} + +func (x ResourceLocator_Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceLocator_Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_xds_core_v3_resource_locator_proto_enumTypes[0].Descriptor() +} + +func (ResourceLocator_Scheme) Type() protoreflect.EnumType { + return &file_xds_core_v3_resource_locator_proto_enumTypes[0] +} + +func (x ResourceLocator_Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceLocator_Scheme.Descriptor instead. +func (ResourceLocator_Scheme) EnumDescriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0} +} + +type ResourceLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Scheme ResourceLocator_Scheme `protobuf:"varint,1,opt,name=scheme,proto3,enum=xds.core.v3.ResourceLocator_Scheme" json:"scheme,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // Types that are assignable to ContextParamSpecifier: + // + // *ResourceLocator_ExactContext + ContextParamSpecifier isResourceLocator_ContextParamSpecifier `protobuf_oneof:"context_param_specifier"` + Directives []*ResourceLocator_Directive `protobuf:"bytes,6,rep,name=directives,proto3" json:"directives,omitempty"` +} + +func (x *ResourceLocator) Reset() { + *x = ResourceLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator) ProtoMessage() {} + +func (x *ResourceLocator) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead. +func (*ResourceLocator) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceLocator) GetScheme() ResourceLocator_Scheme { + if x != nil { + return x.Scheme + } + return ResourceLocator_XDSTP +} + +func (x *ResourceLocator) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ResourceLocator) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ResourceLocator) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (m *ResourceLocator) GetContextParamSpecifier() isResourceLocator_ContextParamSpecifier { + if m != nil { + return m.ContextParamSpecifier + } + return nil +} + +func (x *ResourceLocator) GetExactContext() *ContextParams { + if x, ok := x.GetContextParamSpecifier().(*ResourceLocator_ExactContext); ok { + return x.ExactContext + } + return nil +} + +func (x *ResourceLocator) GetDirectives() []*ResourceLocator_Directive { + if x != nil { + return x.Directives + } + return nil +} + +type isResourceLocator_ContextParamSpecifier interface { + isResourceLocator_ContextParamSpecifier() +} + +type ResourceLocator_ExactContext struct { + ExactContext *ContextParams `protobuf:"bytes,5,opt,name=exact_context,json=exactContext,proto3,oneof"` +} + +func (*ResourceLocator_ExactContext) isResourceLocator_ContextParamSpecifier() {} + +type ResourceLocator_Directive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Directive: + // + // *ResourceLocator_Directive_Alt + // *ResourceLocator_Directive_Entry + Directive isResourceLocator_Directive_Directive `protobuf_oneof:"directive"` +} + +func (x *ResourceLocator_Directive) Reset() { + *x = ResourceLocator_Directive{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator_Directive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator_Directive) ProtoMessage() {} + +func (x *ResourceLocator_Directive) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator_Directive.ProtoReflect.Descriptor instead. +func (*ResourceLocator_Directive) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *ResourceLocator_Directive) GetDirective() isResourceLocator_Directive_Directive { + if m != nil { + return m.Directive + } + return nil +} + +func (x *ResourceLocator_Directive) GetAlt() *ResourceLocator { + if x, ok := x.GetDirective().(*ResourceLocator_Directive_Alt); ok { + return x.Alt + } + return nil +} + +func (x *ResourceLocator_Directive) GetEntry() string { + if x, ok := x.GetDirective().(*ResourceLocator_Directive_Entry); ok { + return x.Entry + } + return "" +} + +type isResourceLocator_Directive_Directive interface { + isResourceLocator_Directive_Directive() +} + +type ResourceLocator_Directive_Alt struct { + Alt *ResourceLocator `protobuf:"bytes,1,opt,name=alt,proto3,oneof"` +} + +type ResourceLocator_Directive_Entry struct { + Entry string `protobuf:"bytes,2,opt,name=entry,proto3,oneof"` +} + +func (*ResourceLocator_Directive_Alt) isResourceLocator_Directive_Directive() {} + +func (*ResourceLocator_Directive_Entry) isResourceLocator_Directive_Directive() {} + +var File_xds_core_v3_resource_locator_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x04, + 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x12, 0x45, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x23, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x52, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x1a, + 0x88, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x30, 0x0a, + 0x03, 0x61, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6c, 0x74, 0x12, + 0x37, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1f, + 0xfa, 0x42, 0x1c, 0x72, 0x1a, 0x10, 0x01, 0x32, 0x16, 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, + 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x2f, 0x7e, 0x3a, 0x5d, 0x2b, 0x24, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x27, 0x0a, 0x06, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x58, 0x44, 0x53, 0x54, 0x50, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c, + 0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_locator_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_locator_proto_rawDescData = file_xds_core_v3_resource_locator_proto_rawDesc +) + +func file_xds_core_v3_resource_locator_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_locator_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_locator_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_locator_proto_rawDescData) + }) + return file_xds_core_v3_resource_locator_proto_rawDescData +} + +var file_xds_core_v3_resource_locator_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_xds_core_v3_resource_locator_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_resource_locator_proto_goTypes = []interface{}{ + (ResourceLocator_Scheme)(0), // 0: xds.core.v3.ResourceLocator.Scheme + (*ResourceLocator)(nil), // 1: xds.core.v3.ResourceLocator + (*ResourceLocator_Directive)(nil), // 2: xds.core.v3.ResourceLocator.Directive + (*ContextParams)(nil), // 3: xds.core.v3.ContextParams +} +var file_xds_core_v3_resource_locator_proto_depIdxs = []int32{ + 0, // 0: xds.core.v3.ResourceLocator.scheme:type_name -> xds.core.v3.ResourceLocator.Scheme + 3, // 1: xds.core.v3.ResourceLocator.exact_context:type_name -> xds.core.v3.ContextParams + 2, // 2: xds.core.v3.ResourceLocator.directives:type_name -> xds.core.v3.ResourceLocator.Directive + 1, // 3: xds.core.v3.ResourceLocator.Directive.alt:type_name -> xds.core.v3.ResourceLocator + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_locator_proto_init() } +func file_xds_core_v3_resource_locator_proto_init() { + if File_xds_core_v3_resource_locator_proto != nil { + return + } + file_xds_core_v3_context_params_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_locator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_core_v3_resource_locator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator_Directive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_core_v3_resource_locator_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ResourceLocator_ExactContext)(nil), + } + file_xds_core_v3_resource_locator_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ResourceLocator_Directive_Alt)(nil), + (*ResourceLocator_Directive_Entry)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_locator_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_locator_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_locator_proto_depIdxs, + EnumInfos: file_xds_core_v3_resource_locator_proto_enumTypes, + MessageInfos: file_xds_core_v3_resource_locator_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_locator_proto = out.File + file_xds_core_v3_resource_locator_proto_rawDesc = nil + file_xds_core_v3_resource_locator_proto_goTypes = nil + file_xds_core_v3_resource_locator_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go new file mode 100644 index 000000000..1686e98d1 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go @@ -0,0 +1,439 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource_locator.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceLocator with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocatorMultiError, or nil if none found. +func (m *ResourceLocator) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok { + err := ResourceLocatorValidationError{ + field: "Scheme", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Id + + // no validation rules for Authority + + if utf8.RuneCountInString(m.GetResourceType()) < 1 { + err := ResourceLocatorValidationError{ + field: "ResourceType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDirectives() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + switch v := m.ContextParamSpecifier.(type) { + case *ResourceLocator_ExactContext: + if v == nil { + err := ResourceLocatorValidationError{ + field: "ContextParamSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExactContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ResourceLocatorMultiError(errors) + } + + return nil +} + +// ResourceLocatorMultiError is an error wrapping multiple validation errors +// returned by ResourceLocator.ValidateAll() if the designated constraints +// aren't met. +type ResourceLocatorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocatorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocatorMultiError) AllErrors() []error { return m } + +// ResourceLocatorValidationError is the validation error returned by +// ResourceLocator.Validate if the designated constraints aren't met. +type ResourceLocatorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocatorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocatorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocatorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocatorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceLocatorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocatorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocatorValidationError{} + +// Validate checks the field values on ResourceLocator_Directive with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator_Directive) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator_Directive with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocator_DirectiveMultiError, or nil if none found. +func (m *ResourceLocator_Directive) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator_Directive) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofDirectivePresent := false + switch v := m.Directive.(type) { + case *ResourceLocator_Directive_Alt: + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true + + if all { + switch v := interface{}(m.GetAlt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ResourceLocator_Directive_Entry: + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true + + if utf8.RuneCountInString(m.GetEntry()) < 1 { + err := ResourceLocator_DirectiveValidationError{ + field: "Entry", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) { + err := ResourceLocator_DirectiveValidationError{ + field: "Entry", + reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofDirectivePresent { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ResourceLocator_DirectiveMultiError(errors) + } + + return nil +} + +// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation +// errors returned by ResourceLocator_Directive.ValidateAll() if the +// designated constraints aren't met. +type ResourceLocator_DirectiveMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocator_DirectiveMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m } + +// ResourceLocator_DirectiveValidationError is the validation error returned by +// ResourceLocator_Directive.Validate if the designated constraints aren't met. +type ResourceLocator_DirectiveValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocator_DirectiveValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocator_DirectiveValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocator_DirectiveValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocator_DirectiveValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocator_DirectiveValidationError) ErrorName() string { + return "ResourceLocator_DirectiveValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceLocator_DirectiveValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator_Directive.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocator_DirectiveValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocator_DirectiveValidationError{} + +var _ResourceLocator_Directive_Entry_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\./~:]+$") diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go new file mode 100644 index 000000000..3d42818b7 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource_name.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + ResourceType string `protobuf:"bytes,3,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + Context *ContextParams `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ResourceName) Reset() { + *x = ResourceName{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_name_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceName) ProtoMessage() {} + +func (x *ResourceName) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_name_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead. +func (*ResourceName) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_name_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceName) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ResourceName) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ResourceName) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceName) GetContext() *ContextParams { + if x != nil { + return x.Context + } + return nil +} + +var File_xds_core_v3_resource_name_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_name_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_name_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_name_proto_rawDescData = file_xds_core_v3_resource_name_proto_rawDesc +) + +func file_xds_core_v3_resource_name_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_name_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_name_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_name_proto_rawDescData) + }) + return file_xds_core_v3_resource_name_proto_rawDescData +} + +var file_xds_core_v3_resource_name_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_resource_name_proto_goTypes = []interface{}{ + (*ResourceName)(nil), // 0: xds.core.v3.ResourceName + (*ContextParams)(nil), // 1: xds.core.v3.ContextParams +} +var file_xds_core_v3_resource_name_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.ResourceName.context:type_name -> xds.core.v3.ContextParams + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_name_proto_init() } +func file_xds_core_v3_resource_name_proto_init() { + if File_xds_core_v3_resource_name_proto != nil { + return + } + file_xds_core_v3_context_params_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_name_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_name_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_name_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_name_proto_depIdxs, + MessageInfos: file_xds_core_v3_resource_name_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_name_proto = out.File + file_xds_core_v3_resource_name_proto_rawDesc = nil + file_xds_core_v3_resource_name_proto_goTypes = nil + file_xds_core_v3_resource_name_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go new file mode 100644 index 000000000..270e921bc --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource_name.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceName with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceNameMultiError, or +// nil if none found. +func (m *ResourceName) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for Authority + + if utf8.RuneCountInString(m.GetResourceType()) < 1 { + err := ResourceNameValidationError{ + field: "ResourceType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceNameMultiError(errors) + } + + return nil +} + +// ResourceNameMultiError is an error wrapping multiple validation errors +// returned by ResourceName.ValidateAll() if the designated constraints aren't met. +type ResourceNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceNameMultiError) AllErrors() []error { return m } + +// ResourceNameValidationError is the validation error returned by +// ResourceName.Validate if the designated constraints aren't met. +type ResourceNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceNameValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go new file mode 100644 index 000000000..74899339b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/data/orca/v3/orca_load_report.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OrcaLoadReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` + // Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. + Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"` + RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"` + Eps float64 `protobuf:"fixed64,7,opt,name=eps,proto3" json:"eps,omitempty"` + NamedMetrics map[string]float64 `protobuf:"bytes,8,rep,name=named_metrics,json=namedMetrics,proto3" json:"named_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + ApplicationUtilization float64 `protobuf:"fixed64,9,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` +} + +func (x *OrcaLoadReport) Reset() { + *x = OrcaLoadReport{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrcaLoadReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrcaLoadReport) ProtoMessage() {} + +func (x *OrcaLoadReport) ProtoReflect() protoreflect.Message { + mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrcaLoadReport.ProtoReflect.Descriptor instead. +func (*OrcaLoadReport) Descriptor() ([]byte, []int) { + return file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP(), []int{0} +} + +func (x *OrcaLoadReport) GetCpuUtilization() float64 { + if x != nil { + return x.CpuUtilization + } + return 0 +} + +func (x *OrcaLoadReport) GetMemUtilization() float64 { + if x != nil { + return x.MemUtilization + } + return 0 +} + +// Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. +func (x *OrcaLoadReport) GetRps() uint64 { + if x != nil { + return x.Rps + } + return 0 +} + +func (x *OrcaLoadReport) GetRequestCost() map[string]float64 { + if x != nil { + return x.RequestCost + } + return nil +} + +func (x *OrcaLoadReport) GetUtilization() map[string]float64 { + if x != nil { + return x.Utilization + } + return nil +} + +func (x *OrcaLoadReport) GetRpsFractional() float64 { + if x != nil { + return x.RpsFractional + } + return 0 +} + +func (x *OrcaLoadReport) GetEps() float64 { + if x != nil { + return x.Eps + } + return 0 +} + +func (x *OrcaLoadReport) GetNamedMetrics() map[string]float64 { + if x != nil { + return x.NamedMetrics + } + return nil +} + +func (x *OrcaLoadReport) GetApplicationUtilization() float64 { + if x != nil { + return x.ApplicationUtilization + } + return 0 +} + +var File_xds_data_orca_v3_orca_load_report_proto protoreflect.FileDescriptor + +var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, + 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, + 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x71, + 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, + 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x9a, 0x01, 0x16, 0x2a, 0x14, + 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, + 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, + 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, + 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, + 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce sync.Once + file_xds_data_orca_v3_orca_load_report_proto_rawDescData = file_xds_data_orca_v3_orca_load_report_proto_rawDesc +) + +func file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP() []byte { + file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce.Do(func() { + file_xds_data_orca_v3_orca_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_data_orca_v3_orca_load_report_proto_rawDescData) + }) + return file_xds_data_orca_v3_orca_load_report_proto_rawDescData +} + +var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_xds_data_orca_v3_orca_load_report_proto_goTypes = []interface{}{ + (*OrcaLoadReport)(nil), // 0: xds.data.orca.v3.OrcaLoadReport + nil, // 1: xds.data.orca.v3.OrcaLoadReport.RequestCostEntry + nil, // 2: xds.data.orca.v3.OrcaLoadReport.UtilizationEntry + nil, // 3: xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry +} +var file_xds_data_orca_v3_orca_load_report_proto_depIdxs = []int32{ + 1, // 0: xds.data.orca.v3.OrcaLoadReport.request_cost:type_name -> xds.data.orca.v3.OrcaLoadReport.RequestCostEntry + 2, // 1: xds.data.orca.v3.OrcaLoadReport.utilization:type_name -> xds.data.orca.v3.OrcaLoadReport.UtilizationEntry + 3, // 2: xds.data.orca.v3.OrcaLoadReport.named_metrics:type_name -> xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_data_orca_v3_orca_load_report_proto_init() } +func file_xds_data_orca_v3_orca_load_report_proto_init() { + if File_xds_data_orca_v3_orca_load_report_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrcaLoadReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_data_orca_v3_orca_load_report_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_data_orca_v3_orca_load_report_proto_goTypes, + DependencyIndexes: file_xds_data_orca_v3_orca_load_report_proto_depIdxs, + MessageInfos: file_xds_data_orca_v3_orca_load_report_proto_msgTypes, + }.Build() + File_xds_data_orca_v3_orca_load_report_proto = out.File + file_xds_data_orca_v3_orca_load_report_proto_rawDesc = nil + file_xds_data_orca_v3_orca_load_report_proto_goTypes = nil + file_xds_data_orca_v3_orca_load_report_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go new file mode 100644 index 000000000..8dd55330a --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go @@ -0,0 +1,225 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/data/orca/v3/orca_load_report.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OrcaLoadReport with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrcaLoadReport) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReport with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrcaLoadReportMultiError, +// or nil if none found. +func (m *OrcaLoadReport) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReport) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetCpuUtilization() < 0 { + err := OrcaLoadReportValidationError{ + field: "CpuUtilization", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if val := m.GetMemUtilization(); val < 0 || val > 1 { + err := OrcaLoadReportValidationError{ + field: "MemUtilization", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Rps + + // no validation rules for RequestCost + + { + sorted_keys := make([]string, len(m.GetUtilization())) + i := 0 + for key := range m.GetUtilization() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetUtilization()[key] + _ = val + + // no validation rules for Utilization[key] + + if val := val; val < 0 || val > 1 { + err := OrcaLoadReportValidationError{ + field: fmt.Sprintf("Utilization[%v]", key), + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetRpsFractional() < 0 { + err := OrcaLoadReportValidationError{ + field: "RpsFractional", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetEps() < 0 { + err := OrcaLoadReportValidationError{ + field: "Eps", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for NamedMetrics + + if m.GetApplicationUtilization() < 0 { + err := OrcaLoadReportValidationError{ + field: "ApplicationUtilization", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return OrcaLoadReportMultiError(errors) + } + + return nil +} + +// OrcaLoadReportMultiError is an error wrapping multiple validation errors +// returned by OrcaLoadReport.ValidateAll() if the designated constraints +// aren't met. +type OrcaLoadReportMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportMultiError) AllErrors() []error { return m } + +// OrcaLoadReportValidationError is the validation error returned by +// OrcaLoadReport.Validate if the designated constraints aren't met. +type OrcaLoadReportValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrcaLoadReportValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrcaLoadReportValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrcaLoadReportValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrcaLoadReportValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrcaLoadReportValidationError) ErrorName() string { return "OrcaLoadReportValidationError" } + +// Error satisfies the builtin error interface +func (e OrcaLoadReportValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrcaLoadReport.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrcaLoadReportValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrcaLoadReportValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go new file mode 100644 index 000000000..463f4ed33 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OrcaLoadReportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` + RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` +} + +func (x *OrcaLoadReportRequest) Reset() { + *x = OrcaLoadReportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrcaLoadReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrcaLoadReportRequest) ProtoMessage() {} + +func (x *OrcaLoadReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrcaLoadReportRequest.ProtoReflect.Descriptor instead. +func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) { + return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0} +} + +func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration { + if x != nil { + return x.ReportInterval + } + return nil +} + +func (x *OrcaLoadReportRequest) GetRequestCostNames() []string { + if x != nil { + return x.RequestCostNames + } + return nil +} + +var File_xds_service_orca_v3_orca_proto protoreflect.FileDescriptor + +var file_xds_service_orca_v3_orca_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, + 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, + 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, + 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, + 0x01, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x75, 0x0a, 0x0e, 0x4f, 0x70, + 0x65, 0x6e, 0x52, 0x63, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x11, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x72, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x2a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x30, + 0x01, 0x42, 0x59, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, 0x63, 0x61, + 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4f, 0x72, 0x63, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_service_orca_v3_orca_proto_rawDescOnce sync.Once + file_xds_service_orca_v3_orca_proto_rawDescData = file_xds_service_orca_v3_orca_proto_rawDesc +) + +func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte { + file_xds_service_orca_v3_orca_proto_rawDescOnce.Do(func() { + file_xds_service_orca_v3_orca_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_service_orca_v3_orca_proto_rawDescData) + }) + return file_xds_service_orca_v3_orca_proto_rawDescData +} + +var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{ + (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration + (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport +} +var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{ + 1, // 0: xds.service.orca.v3.OrcaLoadReportRequest.report_interval:type_name -> google.protobuf.Duration + 0, // 1: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:input_type -> xds.service.orca.v3.OrcaLoadReportRequest + 2, // 2: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:output_type -> xds.data.orca.v3.OrcaLoadReport + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_service_orca_v3_orca_proto_init() } +func file_xds_service_orca_v3_orca_proto_init() { + if File_xds_service_orca_v3_orca_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_service_orca_v3_orca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrcaLoadReportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_service_orca_v3_orca_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_xds_service_orca_v3_orca_proto_goTypes, + DependencyIndexes: file_xds_service_orca_v3_orca_proto_depIdxs, + MessageInfos: file_xds_service_orca_v3_orca_proto_msgTypes, + }.Build() + File_xds_service_orca_v3_orca_proto = out.File + file_xds_service_orca_v3_orca_proto_rawDesc = nil + file_xds_service_orca_v3_orca_proto_goTypes = nil + file_xds_service_orca_v3_orca_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go new file mode 100644 index 000000000..8949e8372 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *OrcaLoadReportRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OrcaLoadReportRequestMultiError, or nil if none found. +func (m *OrcaLoadReportRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReportRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReportInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OrcaLoadReportRequestMultiError(errors) + } + + return nil +} + +// OrcaLoadReportRequestMultiError is an error wrapping multiple validation +// errors returned by OrcaLoadReportRequest.ValidateAll() if the designated +// constraints aren't met. +type OrcaLoadReportRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportRequestMultiError) AllErrors() []error { return m } + +// OrcaLoadReportRequestValidationError is the validation error returned by +// OrcaLoadReportRequest.Validate if the designated constraints aren't met. +type OrcaLoadReportRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrcaLoadReportRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrcaLoadReportRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrcaLoadReportRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrcaLoadReportRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrcaLoadReportRequestValidationError) ErrorName() string { + return "OrcaLoadReportRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e OrcaLoadReportRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrcaLoadReportRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrcaLoadReportRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrcaLoadReportRequestValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go new file mode 100644 index 000000000..6cecac149 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.27.0--rc2 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + context "context" + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics" +) + +// OpenRcaServiceClient is the client API for OpenRcaService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenRcaServiceClient interface { + StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) +} + +type openRcaServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { + return &openRcaServiceClient{cc} +} + +func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &openRcaServiceStreamCoreMetricsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenRcaService_StreamCoreMetricsClient interface { + Recv() (*v3.OrcaLoadReport, error) + grpc.ClientStream +} + +type openRcaServiceStreamCoreMetricsClient struct { + grpc.ClientStream +} + +func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { + m := new(v3.OrcaLoadReport) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OpenRcaServiceServer is the server API for OpenRcaService service. +// All implementations should embed UnimplementedOpenRcaServiceServer +// for forward compatibility +type OpenRcaServiceServer interface { + StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error +} + +// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations. +type UnimplementedOpenRcaServiceServer struct { +} + +func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") +} + +// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will +// result in compilation errors. +type UnsafeOpenRcaServiceServer interface { + mustEmbedUnimplementedOpenRcaServiceServer() +} + +func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) { + s.RegisterService(&OpenRcaService_ServiceDesc, srv) +} + +func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(OrcaLoadReportRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) +} + +type OpenRcaService_StreamCoreMetricsServer interface { + Send(*v3.OrcaLoadReport) error + grpc.ServerStream +} + +type openRcaServiceStreamCoreMetricsServer struct { + grpc.ServerStream +} + +func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { + return x.ServerStream.SendMsg(m) +} + +// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenRcaService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "xds.service.orca.v3.OpenRcaService", + HandlerType: (*OpenRcaServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCoreMetrics", + Handler: _OpenRcaService_StreamCoreMetrics_Handler, + ServerStreams: true, + }, + }, + Metadata: "xds/service/orca/v3/orca.proto", +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go new file mode 100644 index 000000000..7299227a3 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/cel.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CelMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExprMatch *v3.CelExpression `protobuf:"bytes,1,opt,name=expr_match,json=exprMatch,proto3" json:"expr_match,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *CelMatcher) Reset() { + *x = CelMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelMatcher) ProtoMessage() {} + +func (x *CelMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelMatcher.ProtoReflect.Descriptor instead. +func (*CelMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_cel_proto_rawDescGZIP(), []int{0} +} + +func (x *CelMatcher) GetExprMatch() *v3.CelExpression { + if x != nil { + return x.ExprMatch + } + return nil +} + +func (x *CelMatcher) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_xds_type_matcher_v3_cel_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, + 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_cel_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_cel_proto_rawDescData = file_xds_type_matcher_v3_cel_proto_rawDesc +) + +func file_xds_type_matcher_v3_cel_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_cel_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_cel_proto_rawDescData) + }) + return file_xds_type_matcher_v3_cel_proto_rawDescData +} + +var file_xds_type_matcher_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_matcher_v3_cel_proto_goTypes = []interface{}{ + (*CelMatcher)(nil), // 0: xds.type.matcher.v3.CelMatcher + (*v3.CelExpression)(nil), // 1: xds.type.v3.CelExpression +} +var file_xds_type_matcher_v3_cel_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.CelMatcher.expr_match:type_name -> xds.type.v3.CelExpression + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_cel_proto_init() } +func file_xds_type_matcher_v3_cel_proto_init() { + if File_xds_type_matcher_v3_cel_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_cel_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_cel_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_cel_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_cel_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_cel_proto = out.File + file_xds_type_matcher_v3_cel_proto_rawDesc = nil + file_xds_type_matcher_v3_cel_proto_goTypes = nil + file_xds_type_matcher_v3_cel_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go new file mode 100644 index 000000000..091267b0c --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/cel.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CelMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CelMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CelMatcherMultiError, or +// nil if none found. +func (m *CelMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *CelMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExprMatch() == nil { + err := CelMatcherValidationError{ + field: "ExprMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExprMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExprMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Description + + if len(errors) > 0 { + return CelMatcherMultiError(errors) + } + + return nil +} + +// CelMatcherMultiError is an error wrapping multiple validation errors +// returned by CelMatcher.ValidateAll() if the designated constraints aren't met. +type CelMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelMatcherMultiError) AllErrors() []error { return m } + +// CelMatcherValidationError is the validation error returned by +// CelMatcher.Validate if the designated constraints aren't met. +type CelMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelMatcherValidationError) ErrorName() string { return "CelMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e CelMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go new file mode 100644 index 000000000..5f72c8d11 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/domain.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerNameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DomainMatchers []*ServerNameMatcher_DomainMatcher `protobuf:"bytes,1,rep,name=domain_matchers,json=domainMatchers,proto3" json:"domain_matchers,omitempty"` +} + +func (x *ServerNameMatcher) Reset() { + *x = ServerNameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerNameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerNameMatcher) ProtoMessage() {} + +func (x *ServerNameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerNameMatcher.ProtoReflect.Descriptor instead. +func (*ServerNameMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerNameMatcher) GetDomainMatchers() []*ServerNameMatcher_DomainMatcher { + if x != nil { + return x.DomainMatchers + } + return nil +} + +type ServerNameMatcher_DomainMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *ServerNameMatcher_DomainMatcher) Reset() { + *x = ServerNameMatcher_DomainMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerNameMatcher_DomainMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerNameMatcher_DomainMatcher) ProtoMessage() {} + +func (x *ServerNameMatcher_DomainMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerNameMatcher_DomainMatcher.ProtoReflect.Descriptor instead. +func (*ServerNameMatcher_DomainMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ServerNameMatcher_DomainMatcher) GetDomains() []string { + if x != nil { + return x.Domains + } + return nil +} + +func (x *ServerNameMatcher_DomainMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +var File_xds_type_matcher_v3_domain_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_domain_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0f, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x74, 0x0a, 0x0d, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3f, + 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x6e, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_domain_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_domain_proto_rawDescData = file_xds_type_matcher_v3_domain_proto_rawDesc +) + +func file_xds_type_matcher_v3_domain_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_domain_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_domain_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_domain_proto_rawDescData) + }) + return file_xds_type_matcher_v3_domain_proto_rawDescData +} + +var file_xds_type_matcher_v3_domain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_domain_proto_goTypes = []interface{}{ + (*ServerNameMatcher)(nil), // 0: xds.type.matcher.v3.ServerNameMatcher + (*ServerNameMatcher_DomainMatcher)(nil), // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher + (*Matcher_OnMatch)(nil), // 2: xds.type.matcher.v3.Matcher.OnMatch +} +var file_xds_type_matcher_v3_domain_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.ServerNameMatcher.domain_matchers:type_name -> xds.type.matcher.v3.ServerNameMatcher.DomainMatcher + 2, // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_domain_proto_init() } +func file_xds_type_matcher_v3_domain_proto_init() { + if File_xds_type_matcher_v3_domain_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_domain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerNameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_domain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerNameMatcher_DomainMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_domain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_domain_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_domain_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_domain_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_domain_proto = out.File + file_xds_type_matcher_v3_domain_proto_rawDesc = nil + file_xds_type_matcher_v3_domain_proto_goTypes = nil + file_xds_type_matcher_v3_domain_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go new file mode 100644 index 000000000..e95bdfa28 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go @@ -0,0 +1,315 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/domain.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ServerNameMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ServerNameMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ServerNameMatcherMultiError, or nil if none found. +func (m *ServerNameMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerNameMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDomainMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ServerNameMatcherMultiError(errors) + } + + return nil +} + +// ServerNameMatcherMultiError is an error wrapping multiple validation errors +// returned by ServerNameMatcher.ValidateAll() if the designated constraints +// aren't met. +type ServerNameMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerNameMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerNameMatcherMultiError) AllErrors() []error { return m } + +// ServerNameMatcherValidationError is the validation error returned by +// ServerNameMatcher.Validate if the designated constraints aren't met. +type ServerNameMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerNameMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerNameMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerNameMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerNameMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerNameMatcherValidationError) ErrorName() string { + return "ServerNameMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ServerNameMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerNameMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerNameMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerNameMatcherValidationError{} + +// Validate checks the field values on ServerNameMatcher_DomainMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ServerNameMatcher_DomainMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerNameMatcher_DomainMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ServerNameMatcher_DomainMatcherMultiError, or nil if none found. +func (m *ServerNameMatcher_DomainMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerNameMatcher_DomainMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetDomains()) < 1 { + err := ServerNameMatcher_DomainMatcherValidationError{ + field: "Domains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ServerNameMatcher_DomainMatcherMultiError(errors) + } + + return nil +} + +// ServerNameMatcher_DomainMatcherMultiError is an error wrapping multiple +// validation errors returned by ServerNameMatcher_DomainMatcher.ValidateAll() +// if the designated constraints aren't met. +type ServerNameMatcher_DomainMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerNameMatcher_DomainMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerNameMatcher_DomainMatcherMultiError) AllErrors() []error { return m } + +// ServerNameMatcher_DomainMatcherValidationError is the validation error +// returned by ServerNameMatcher_DomainMatcher.Validate if the designated +// constraints aren't met. +type ServerNameMatcher_DomainMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerNameMatcher_DomainMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerNameMatcher_DomainMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerNameMatcher_DomainMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerNameMatcher_DomainMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerNameMatcher_DomainMatcherValidationError) ErrorName() string { + return "ServerNameMatcher_DomainMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ServerNameMatcher_DomainMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerNameMatcher_DomainMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerNameMatcher_DomainMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerNameMatcher_DomainMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go new file mode 100644 index 000000000..4393bb7e2 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/http_inputs.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HttpAttributesCelMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpAttributesCelMatchInput) Reset() { + *x = HttpAttributesCelMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpAttributesCelMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpAttributesCelMatchInput) ProtoMessage() {} + +func (x *HttpAttributesCelMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpAttributesCelMatchInput.ProtoReflect.Descriptor instead. +func (*HttpAttributesCelMatchInput) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0} +} + +var File_xds_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b, + 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65, + 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_http_inputs_proto_rawDescData = file_xds_type_matcher_v3_http_inputs_proto_rawDesc +) + +func file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_http_inputs_proto_rawDescData) + }) + return file_xds_type_matcher_v3_http_inputs_proto_rawDescData +} + +var file_xds_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{ + (*HttpAttributesCelMatchInput)(nil), // 0: xds.type.matcher.v3.HttpAttributesCelMatchInput +} +var file_xds_type_matcher_v3_http_inputs_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_http_inputs_proto_init() } +func file_xds_type_matcher_v3_http_inputs_proto_init() { + if File_xds_type_matcher_v3_http_inputs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpAttributesCelMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_http_inputs_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_http_inputs_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_http_inputs_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_http_inputs_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_http_inputs_proto = out.File + file_xds_type_matcher_v3_http_inputs_proto_rawDesc = nil + file_xds_type_matcher_v3_http_inputs_proto_goTypes = nil + file_xds_type_matcher_v3_http_inputs_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go new file mode 100644 index 000000000..5d8742927 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/http_inputs.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpAttributesCelMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpAttributesCelMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpAttributesCelMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpAttributesCelMatchInputMultiError, or nil if none found. +func (m *HttpAttributesCelMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpAttributesCelMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpAttributesCelMatchInputMultiError(errors) + } + + return nil +} + +// HttpAttributesCelMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpAttributesCelMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpAttributesCelMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpAttributesCelMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpAttributesCelMatchInputMultiError) AllErrors() []error { return m } + +// HttpAttributesCelMatchInputValidationError is the validation error returned +// by HttpAttributesCelMatchInput.Validate if the designated constraints +// aren't met. +type HttpAttributesCelMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpAttributesCelMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpAttributesCelMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpAttributesCelMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpAttributesCelMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpAttributesCelMatchInputValidationError) ErrorName() string { + return "HttpAttributesCelMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpAttributesCelMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpAttributesCelMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpAttributesCelMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpAttributesCelMatchInputValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go new file mode 100644 index 000000000..fdb659946 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/ip.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type IPMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*IPMatcher_IPRangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *IPMatcher) Reset() { + *x = IPMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPMatcher) ProtoMessage() {} + +func (x *IPMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPMatcher.ProtoReflect.Descriptor instead. +func (*IPMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0} +} + +func (x *IPMatcher) GetRangeMatchers() []*IPMatcher_IPRangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type IPMatcher_IPRangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.CidrRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` + Exclusive bool `protobuf:"varint,3,opt,name=exclusive,proto3" json:"exclusive,omitempty"` +} + +func (x *IPMatcher_IPRangeMatcher) Reset() { + *x = IPMatcher_IPRangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPMatcher_IPRangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPMatcher_IPRangeMatcher) ProtoMessage() {} + +func (x *IPMatcher_IPRangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPMatcher_IPRangeMatcher.ProtoReflect.Descriptor instead. +func (*IPMatcher_IPRangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *IPMatcher_IPRangeMatcher) GetRanges() []*v3.CidrRange { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *IPMatcher_IPRangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +func (x *IPMatcher_IPRangeMatcher) GetExclusive() bool { + if x != nil { + return x.Exclusive + } + return false +} + +var File_xds_type_matcher_v3_ip_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_ip_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x69, 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, + 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x09, 0x49, 0x50, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x49, + 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0x0a, + 0x0e, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x38, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x42, 0x66, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, + 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x0e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_ip_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_ip_proto_rawDescData = file_xds_type_matcher_v3_ip_proto_rawDesc +) + +func file_xds_type_matcher_v3_ip_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_ip_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_ip_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_ip_proto_rawDescData) + }) + return file_xds_type_matcher_v3_ip_proto_rawDescData +} + +var file_xds_type_matcher_v3_ip_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_ip_proto_goTypes = []interface{}{ + (*IPMatcher)(nil), // 0: xds.type.matcher.v3.IPMatcher + (*IPMatcher_IPRangeMatcher)(nil), // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher + (*v3.CidrRange)(nil), // 2: xds.core.v3.CidrRange + (*Matcher_OnMatch)(nil), // 3: xds.type.matcher.v3.Matcher.OnMatch +} +var file_xds_type_matcher_v3_ip_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.IPMatcher.range_matchers:type_name -> xds.type.matcher.v3.IPMatcher.IPRangeMatcher + 2, // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.ranges:type_name -> xds.core.v3.CidrRange + 3, // 2: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_ip_proto_init() } +func file_xds_type_matcher_v3_ip_proto_init() { + if File_xds_type_matcher_v3_ip_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_ip_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_ip_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPMatcher_IPRangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_ip_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_ip_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_ip_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_ip_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_ip_proto = out.File + file_xds_type_matcher_v3_ip_proto_rawDesc = nil + file_xds_type_matcher_v3_ip_proto_goTypes = nil + file_xds_type_matcher_v3_ip_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go new file mode 100644 index 000000000..c1fca03bc --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/ip.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on IPMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *IPMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on IPMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in IPMatcherMultiError, or nil +// if none found. +func (m *IPMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *IPMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return IPMatcherMultiError(errors) + } + + return nil +} + +// IPMatcherMultiError is an error wrapping multiple validation errors returned +// by IPMatcher.ValidateAll() if the designated constraints aren't met. +type IPMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IPMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IPMatcherMultiError) AllErrors() []error { return m } + +// IPMatcherValidationError is the validation error returned by +// IPMatcher.Validate if the designated constraints aren't met. +type IPMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IPMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IPMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IPMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IPMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IPMatcherValidationError) ErrorName() string { return "IPMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e IPMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIPMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IPMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IPMatcherValidationError{} + +// Validate checks the field values on IPMatcher_IPRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *IPMatcher_IPRangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on IPMatcher_IPRangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// IPMatcher_IPRangeMatcherMultiError, or nil if none found. +func (m *IPMatcher_IPRangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *IPMatcher_IPRangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := IPMatcher_IPRangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Exclusive + + if len(errors) > 0 { + return IPMatcher_IPRangeMatcherMultiError(errors) + } + + return nil +} + +// IPMatcher_IPRangeMatcherMultiError is an error wrapping multiple validation +// errors returned by IPMatcher_IPRangeMatcher.ValidateAll() if the designated +// constraints aren't met. +type IPMatcher_IPRangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IPMatcher_IPRangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IPMatcher_IPRangeMatcherMultiError) AllErrors() []error { return m } + +// IPMatcher_IPRangeMatcherValidationError is the validation error returned by +// IPMatcher_IPRangeMatcher.Validate if the designated constraints aren't met. +type IPMatcher_IPRangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IPMatcher_IPRangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IPMatcher_IPRangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IPMatcher_IPRangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IPMatcher_IPRangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IPMatcher_IPRangeMatcherValidationError) ErrorName() string { + return "IPMatcher_IPRangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e IPMatcher_IPRangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIPMatcher_IPRangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IPMatcher_IPRangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IPMatcher_IPRangeMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go new file mode 100644 index 000000000..d94b03b55 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go @@ -0,0 +1,1056 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/matcher.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Matcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatcherType: + // + // *Matcher_MatcherList_ + // *Matcher_MatcherTree_ + MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"` + OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"` +} + +func (x *Matcher) Reset() { + *x = Matcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher) ProtoMessage() {} + +func (x *Matcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher.ProtoReflect.Descriptor instead. +func (*Matcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0} +} + +func (m *Matcher) GetMatcherType() isMatcher_MatcherType { + if m != nil { + return m.MatcherType + } + return nil +} + +func (x *Matcher) GetMatcherList() *Matcher_MatcherList { + if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok { + return x.MatcherList + } + return nil +} + +func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree { + if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok { + return x.MatcherTree + } + return nil +} + +func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch { + if x != nil { + return x.OnNoMatch + } + return nil +} + +type isMatcher_MatcherType interface { + isMatcher_MatcherType() +} + +type Matcher_MatcherList_ struct { + MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"` +} + +type Matcher_MatcherTree_ struct { + MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"` +} + +func (*Matcher_MatcherList_) isMatcher_MatcherType() {} + +func (*Matcher_MatcherTree_) isMatcher_MatcherType() {} + +type Matcher_OnMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OnMatch: + // + // *Matcher_OnMatch_Matcher + // *Matcher_OnMatch_Action + OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"` +} + +func (x *Matcher_OnMatch) Reset() { + *x = Matcher_OnMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_OnMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_OnMatch) ProtoMessage() {} + +func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead. +func (*Matcher_OnMatch) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch { + if m != nil { + return m.OnMatch + } + return nil +} + +func (x *Matcher_OnMatch) GetMatcher() *Matcher { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Matcher_OnMatch) GetAction() *v3.TypedExtensionConfig { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok { + return x.Action + } + return nil +} + +type isMatcher_OnMatch_OnMatch interface { + isMatcher_OnMatch_OnMatch() +} + +type Matcher_OnMatch_Matcher struct { + Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"` +} + +type Matcher_OnMatch_Action struct { + Action *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"` +} + +func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {} + +func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {} + +type Matcher_MatcherList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (x *Matcher_MatcherList) Reset() { + *x = Matcher_MatcherList{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList) ProtoMessage() {} + +func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher { + if x != nil { + return x.Matchers + } + return nil +} + +type Matcher_MatcherTree struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to TreeType: + // + // *Matcher_MatcherTree_ExactMatchMap + // *Matcher_MatcherTree_PrefixMatchMap + // *Matcher_MatcherTree_CustomMatch + TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"` +} + +func (x *Matcher_MatcherTree) Reset() { + *x = Matcher_MatcherTree{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree) ProtoMessage() {} + +func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Matcher_MatcherTree) GetInput() *v3.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType { + if m != nil { + return m.TreeType + } + return nil +} + +func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok { + return x.ExactMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok { + return x.PrefixMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetCustomMatch() *v3.TypedExtensionConfig { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherTree_TreeType interface { + isMatcher_MatcherTree_TreeType() +} + +type Matcher_MatcherTree_ExactMatchMap struct { + ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_PrefixMatchMap struct { + PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_CustomMatch struct { + CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {} + +type Matcher_MatcherList_Predicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchType: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ + // *Matcher_MatcherList_Predicate_OrMatcher + // *Matcher_MatcherList_Predicate_AndMatcher + // *Matcher_MatcherList_Predicate_NotMatcher + MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"` +} + +func (x *Matcher_MatcherList_Predicate) Reset() { + *x = Matcher_MatcherList_Predicate{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType { + if m != nil { + return m.MatchType + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + return x.SinglePredicate + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok { + return x.OrMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok { + return x.AndMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok { + return x.NotMatcher + } + return nil +} + +type isMatcher_MatcherList_Predicate_MatchType interface { + isMatcher_MatcherList_Predicate_MatchType() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ struct { + SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_OrMatcher struct { + OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_AndMatcher struct { + AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_NotMatcher struct { + NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +type Matcher_MatcherList_FieldMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Matcher_MatcherList_FieldMatcher) Reset() { + *x = Matcher_MatcherList_FieldMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_FieldMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {} + +func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type Matcher_MatcherList_Predicate_SinglePredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to Matcher: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch + // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch + Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"` +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() { + *x = Matcher_MatcherList_Predicate_SinglePredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v3.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *StringMatcher { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + return x.ValueMatch + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v3.TypedExtensionConfig { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface { + isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct { + ValueMatch *StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct { + CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +type Matcher_MatcherList_Predicate_PredicateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() { + *x = Matcher_MatcherList_Predicate_PredicateList{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +type Matcher_MatcherTree_MatchMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Matcher_MatcherTree_MatchMap) Reset() { + *x = Matcher_MatcherTree_MatchMap{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree_MatchMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {} + +func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch { + if x != nil { + return x.Map + } + return nil +} + +var File_xds_type_matcher_v3_matcher_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, + 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, + 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04, + 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, + 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d, + 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a, + 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_matcher_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_matcher_proto_rawDescData = file_xds_type_matcher_v3_matcher_proto_rawDesc +) + +func file_xds_type_matcher_v3_matcher_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_matcher_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_matcher_proto_rawDescData) + }) + return file_xds_type_matcher_v3_matcher_proto_rawDescData +} + +var file_xds_type_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_xds_type_matcher_v3_matcher_proto_goTypes = []interface{}{ + (*Matcher)(nil), // 0: xds.type.matcher.v3.Matcher + (*Matcher_OnMatch)(nil), // 1: xds.type.matcher.v3.Matcher.OnMatch + (*Matcher_MatcherList)(nil), // 2: xds.type.matcher.v3.Matcher.MatcherList + (*Matcher_MatcherTree)(nil), // 3: xds.type.matcher.v3.Matcher.MatcherTree + (*Matcher_MatcherList_Predicate)(nil), // 4: xds.type.matcher.v3.Matcher.MatcherList.Predicate + (*Matcher_MatcherList_FieldMatcher)(nil), // 5: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher + (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 6: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 7: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + (*Matcher_MatcherTree_MatchMap)(nil), // 8: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + nil, // 9: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + (*v3.TypedExtensionConfig)(nil), // 10: xds.core.v3.TypedExtensionConfig + (*StringMatcher)(nil), // 11: xds.type.matcher.v3.StringMatcher +} +var file_xds_type_matcher_v3_matcher_proto_depIdxs = []int32{ + 2, // 0: xds.type.matcher.v3.Matcher.matcher_list:type_name -> xds.type.matcher.v3.Matcher.MatcherList + 3, // 1: xds.type.matcher.v3.Matcher.matcher_tree:type_name -> xds.type.matcher.v3.Matcher.MatcherTree + 1, // 2: xds.type.matcher.v3.Matcher.on_no_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 0, // 3: xds.type.matcher.v3.Matcher.OnMatch.matcher:type_name -> xds.type.matcher.v3.Matcher + 10, // 4: xds.type.matcher.v3.Matcher.OnMatch.action:type_name -> xds.core.v3.TypedExtensionConfig + 5, // 5: xds.type.matcher.v3.Matcher.MatcherList.matchers:type_name -> xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher + 10, // 6: xds.type.matcher.v3.Matcher.MatcherTree.input:type_name -> xds.core.v3.TypedExtensionConfig + 8, // 7: xds.type.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + 8, // 8: xds.type.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + 10, // 9: xds.type.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> xds.core.v3.TypedExtensionConfig + 6, // 10: xds.type.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + 7, // 11: xds.type.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 7, // 12: xds.type.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 4, // 13: xds.type.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 4, // 14: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 1, // 15: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 10, // 16: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> xds.core.v3.TypedExtensionConfig + 11, // 17: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> xds.type.matcher.v3.StringMatcher + 10, // 18: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> xds.core.v3.TypedExtensionConfig + 4, // 19: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 9, // 20: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + 1, // 21: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 22, // [22:22] is the sub-list for method output_type + 22, // [22:22] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_matcher_proto_init() } +func file_xds_type_matcher_v3_matcher_proto_init() { + if File_xds_type_matcher_v3_matcher_proto != nil { + return + } + file_xds_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_OnMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_FieldMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree_MatchMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_)(nil), + (*Matcher_MatcherTree_)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Matcher_OnMatch_Matcher)(nil), + (*Matcher_OnMatch_Action)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Matcher_MatcherTree_ExactMatchMap)(nil), + (*Matcher_MatcherTree_PrefixMatchMap)(nil), + (*Matcher_MatcherTree_CustomMatch)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil), + (*Matcher_MatcherList_Predicate_OrMatcher)(nil), + (*Matcher_MatcherList_Predicate_AndMatcher)(nil), + (*Matcher_MatcherList_Predicate_NotMatcher)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil), + (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_matcher_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_matcher_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_matcher_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_matcher_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_matcher_proto = out.File + file_xds_type_matcher_v3_matcher_proto_rawDesc = nil + file_xds_type_matcher_v3_matcher_proto_goTypes = nil + file_xds_type_matcher_v3_matcher_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go new file mode 100644 index 000000000..60b721f5f --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go @@ -0,0 +1,1913 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/matcher.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MatcherMultiError, or nil if none found. +func (m *Matcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetOnNoMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.MatcherType.(type) { + case *Matcher_MatcherList_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcherList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcherTree()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return MatcherMultiError(errors) + } + + return nil +} + +// MatcherMultiError is an error wrapping multiple validation errors returned +// by Matcher.ValidateAll() if the designated constraints aren't met. +type MatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatcherMultiError) AllErrors() []error { return m } + +// MatcherValidationError is the validation error returned by Matcher.Validate +// if the designated constraints aren't met. +type MatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatcherValidationError{} + +// Validate checks the field values on Matcher_OnMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Matcher_OnMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_OnMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_OnMatchMultiError, or nil if none found. +func (m *Matcher_OnMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_OnMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOnMatchPresent := false + switch v := m.OnMatch.(type) { + case *Matcher_OnMatch_Matcher: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_OnMatch_Action: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOnMatchPresent { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_OnMatchMultiError(errors) + } + + return nil +} + +// Matcher_OnMatchMultiError is an error wrapping multiple validation errors +// returned by Matcher_OnMatch.ValidateAll() if the designated constraints +// aren't met. +type Matcher_OnMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_OnMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_OnMatchMultiError) AllErrors() []error { return m } + +// Matcher_OnMatchValidationError is the validation error returned by +// Matcher_OnMatch.Validate if the designated constraints aren't met. +type Matcher_OnMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_OnMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_OnMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_OnMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_OnMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" } + +// Error satisfies the builtin error interface +func (e Matcher_OnMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_OnMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_OnMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_OnMatchValidationError{} + +// Validate checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherListMultiError, or nil if none found. +func (m *Matcher_MatcherList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMatchers()) < 1 { + err := Matcher_MatcherListValidationError{ + field: "Matchers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherListMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherListValidationError is the validation error returned by +// Matcher_MatcherList.Validate if the designated constraints aren't met. +type Matcher_MatcherListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherListValidationError) ErrorName() string { + return "Matcher_MatcherListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTreeMultiError, or nil if none found. +func (m *Matcher_MatcherTree) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTreeTypePresent := false + switch v := m.TreeType.(type) { + case *Matcher_MatcherTree_ExactMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetExactMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_PrefixMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetPrefixMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_CustomMatch: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTreeTypePresent { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherTreeMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTreeMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherTree.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherTreeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTreeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTreeValidationError is the validation error returned by +// Matcher_MatcherTree.Validate if the designated constraints aren't met. +type Matcher_MatcherTreeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTreeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTreeValidationError) ErrorName() string { + return "Matcher_MatcherTreeValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTreeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTreeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTreeValidationError{} + +// Validate checks the field values on Matcher_MatcherList_Predicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_Predicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_PredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchTypePresent := false + switch v := m.MatchType.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetSinglePredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_OrMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetOrMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_AndMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetAndMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_NotMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetNotMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchTypePresent { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_PredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll() +// if the designated constraints aren't met. +type Matcher_MatcherList_PredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_PredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_PredicateValidationError is the validation error +// returned by Matcher_MatcherList_Predicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_PredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_PredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_PredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_PredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_PredicateValidationError{} + +// Validate checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Matcher_MatcherList_FieldMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found. +func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPredicate() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOnMatch() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Matcher_MatcherList_FieldMatcherMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple +// validation errors returned by +// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_FieldMatcherValidationError is the validation error +// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string { + return "Matcher_MatcherList_FieldMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_FieldMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_FieldMatcherValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the +// designated constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the +// validation error returned by +// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_SinglePredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPredicate()) < 2 { + err := Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: "Predicate", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_PredicateListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation +// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if +// the designated constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_PredicateListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree_MatchMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTree_MatchMapMultiError, or nil if none found. +func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMap()) < 1 { + err := Matcher_MatcherTree_MatchMapValidationError{ + field: "Map", + reason: "value must contain at least 1 pair(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetMap())) + i := 0 + for key := range m.GetMap() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetMap()[key] + _ = val + + // no validation rules for Map[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return Matcher_MatcherTree_MatchMapMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if +// the designated constraints aren't met. +type Matcher_MatcherTree_MatchMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTree_MatchMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTree_MatchMapValidationError is the validation error returned +// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints +// aren't met. +type Matcher_MatcherTree_MatchMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string { + return "Matcher_MatcherTree_MatchMapValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTree_MatchMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTree_MatchMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTree_MatchMapValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go new file mode 100644 index 000000000..2861768da --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go @@ -0,0 +1,539 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/range.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Int64RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*Int64RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *Int64RangeMatcher) Reset() { + *x = Int64RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64RangeMatcher) ProtoMessage() {} + +func (x *Int64RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int64RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64RangeMatcher) GetRangeMatchers() []*Int64RangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type Int32RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*Int32RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *Int32RangeMatcher) Reset() { + *x = Int32RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32RangeMatcher) ProtoMessage() {} + +func (x *Int32RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int32RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32RangeMatcher) GetRangeMatchers() []*Int32RangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type DoubleRangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*DoubleRangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *DoubleRangeMatcher) Reset() { + *x = DoubleRangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRangeMatcher) ProtoMessage() {} + +func (x *DoubleRangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRangeMatcher.ProtoReflect.Descriptor instead. +func (*DoubleRangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRangeMatcher) GetRangeMatchers() []*DoubleRangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type Int64RangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.Int64Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Int64RangeMatcher_RangeMatcher) Reset() { + *x = Int64RangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64RangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64RangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *Int64RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int64RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Int64RangeMatcher_RangeMatcher) GetRanges() []*v3.Int64Range { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *Int64RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type Int32RangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.Int32Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Int32RangeMatcher_RangeMatcher) Reset() { + *x = Int32RangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32RangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32RangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *Int32RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int32RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Int32RangeMatcher_RangeMatcher) GetRanges() []*v3.Int32Range { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *Int32RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type DoubleRangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.DoubleRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *DoubleRangeMatcher_RangeMatcher) Reset() { + *x = DoubleRangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *DoubleRangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*DoubleRangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DoubleRangeMatcher_RangeMatcher) GetRanges() []*v3.DoubleRange { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *DoubleRangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +var File_xds_type_matcher_v3_range_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x01, 0x0a, 0x11, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, + 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, + 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xfc, 0x01, 0x0a, 0x11, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, 0x0a, + 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, + 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xff, 0x01, 0x0a, 0x12, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x5b, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8b, 0x01, + 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3a, + 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x5a, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_range_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_range_proto_rawDescData = file_xds_type_matcher_v3_range_proto_rawDesc +) + +func file_xds_type_matcher_v3_range_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_range_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_range_proto_rawDescData) + }) + return file_xds_type_matcher_v3_range_proto_rawDescData +} + +var file_xds_type_matcher_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_xds_type_matcher_v3_range_proto_goTypes = []interface{}{ + (*Int64RangeMatcher)(nil), // 0: xds.type.matcher.v3.Int64RangeMatcher + (*Int32RangeMatcher)(nil), // 1: xds.type.matcher.v3.Int32RangeMatcher + (*DoubleRangeMatcher)(nil), // 2: xds.type.matcher.v3.DoubleRangeMatcher + (*Int64RangeMatcher_RangeMatcher)(nil), // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher + (*Int32RangeMatcher_RangeMatcher)(nil), // 4: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher + (*DoubleRangeMatcher_RangeMatcher)(nil), // 5: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher + (*v3.Int64Range)(nil), // 6: xds.type.v3.Int64Range + (*Matcher_OnMatch)(nil), // 7: xds.type.matcher.v3.Matcher.OnMatch + (*v3.Int32Range)(nil), // 8: xds.type.v3.Int32Range + (*v3.DoubleRange)(nil), // 9: xds.type.v3.DoubleRange +} +var file_xds_type_matcher_v3_range_proto_depIdxs = []int32{ + 3, // 0: xds.type.matcher.v3.Int64RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher + 4, // 1: xds.type.matcher.v3.Int32RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher + 5, // 2: xds.type.matcher.v3.DoubleRangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher + 6, // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int64Range + 7, // 4: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 8, // 5: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int32Range + 7, // 6: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 9, // 7: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.DoubleRange + 7, // 8: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_range_proto_init() } +func file_xds_type_matcher_v3_range_proto_init() { + if File_xds_type_matcher_v3_range_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64RangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32RangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_range_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_range_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_range_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_range_proto = out.File + file_xds_type_matcher_v3_range_proto_rawDesc = nil + file_xds_type_matcher_v3_range_proto_goTypes = nil + file_xds_type_matcher_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go new file mode 100644 index 000000000..8cb598643 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go @@ -0,0 +1,975 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/range.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64RangeMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Int64RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64RangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Int64RangeMatcherMultiError, or nil if none found. +func (m *Int64RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Int64RangeMatcherMultiError(errors) + } + + return nil +} + +// Int64RangeMatcherMultiError is an error wrapping multiple validation errors +// returned by Int64RangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type Int64RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMatcherMultiError) AllErrors() []error { return m } + +// Int64RangeMatcherValidationError is the validation error returned by +// Int64RangeMatcher.Validate if the designated constraints aren't met. +type Int64RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeMatcherValidationError) ErrorName() string { + return "Int64RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int64RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeMatcherValidationError{} + +// Validate checks the field values on Int32RangeMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Int32RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32RangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Int32RangeMatcherMultiError, or nil if none found. +func (m *Int32RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Int32RangeMatcherMultiError(errors) + } + + return nil +} + +// Int32RangeMatcherMultiError is an error wrapping multiple validation errors +// returned by Int32RangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type Int32RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMatcherMultiError) AllErrors() []error { return m } + +// Int32RangeMatcherValidationError is the validation error returned by +// Int32RangeMatcher.Validate if the designated constraints aren't met. +type Int32RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeMatcherValidationError) ErrorName() string { + return "Int32RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int32RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeMatcherValidationError{} + +// Validate checks the field values on DoubleRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DoubleRangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DoubleRangeMatcherMultiError, or nil if none found. +func (m *DoubleRangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return DoubleRangeMatcherMultiError(errors) + } + + return nil +} + +// DoubleRangeMatcherMultiError is an error wrapping multiple validation errors +// returned by DoubleRangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type DoubleRangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMatcherMultiError) AllErrors() []error { return m } + +// DoubleRangeMatcherValidationError is the validation error returned by +// DoubleRangeMatcher.Validate if the designated constraints aren't met. +type DoubleRangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeMatcherValidationError) ErrorName() string { + return "DoubleRangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e DoubleRangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeMatcherValidationError{} + +// Validate checks the field values on Int64RangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Int64RangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64RangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Int64RangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *Int64RangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64RangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := Int64RangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Int64RangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// Int64RangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by Int64RangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type Int64RangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// Int64RangeMatcher_RangeMatcherValidationError is the validation error +// returned by Int64RangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type Int64RangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "Int64RangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int64RangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64RangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeMatcher_RangeMatcherValidationError{} + +// Validate checks the field values on Int32RangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Int32RangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32RangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Int32RangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *Int32RangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32RangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := Int32RangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Int32RangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// Int32RangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by Int32RangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type Int32RangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// Int32RangeMatcher_RangeMatcherValidationError is the validation error +// returned by Int32RangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type Int32RangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "Int32RangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int32RangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32RangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeMatcher_RangeMatcherValidationError{} + +// Validate checks the field values on DoubleRangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DoubleRangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// DoubleRangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *DoubleRangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := DoubleRangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DoubleRangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// DoubleRangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by DoubleRangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type DoubleRangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// DoubleRangeMatcher_RangeMatcherValidationError is the validation error +// returned by DoubleRangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type DoubleRangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "DoubleRangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e DoubleRangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeMatcher_RangeMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go new file mode 100644 index 000000000..3dcf303ac --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/regex.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RegexMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to EngineType: + // + // *RegexMatcher_GoogleRe2 + EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"` + Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"` +} + +func (x *RegexMatcher) Reset() { + *x = RegexMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher) ProtoMessage() {} + +func (x *RegexMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead. +func (*RegexMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0} +} + +func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType { + if m != nil { + return m.EngineType + } + return nil +} + +func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 { + if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok { + return x.GoogleRe2 + } + return nil +} + +func (x *RegexMatcher) GetRegex() string { + if x != nil { + return x.Regex + } + return "" +} + +type isRegexMatcher_EngineType interface { + isRegexMatcher_EngineType() +} + +type RegexMatcher_GoogleRe2 struct { + GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"` +} + +func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {} + +type RegexMatcher_GoogleRE2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RegexMatcher_GoogleRE2) Reset() { + *x = RegexMatcher_GoogleRE2{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher_GoogleRE2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher_GoogleRE2) ProtoMessage() {} + +func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead. +func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0} +} + +var File_xds_type_matcher_v3_regex_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_regex_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xa6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x56, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, + 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x0b, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x52, 0x45, 0x32, 0x42, 0x12, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x5a, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65, + 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_regex_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_regex_proto_rawDescData = file_xds_type_matcher_v3_regex_proto_rawDesc +) + +func file_xds_type_matcher_v3_regex_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_regex_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_regex_proto_rawDescData) + }) + return file_xds_type_matcher_v3_regex_proto_rawDescData +} + +var file_xds_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_regex_proto_goTypes = []interface{}{ + (*RegexMatcher)(nil), // 0: xds.type.matcher.v3.RegexMatcher + (*RegexMatcher_GoogleRE2)(nil), // 1: xds.type.matcher.v3.RegexMatcher.GoogleRE2 +} +var file_xds_type_matcher_v3_regex_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.RegexMatcher.google_re2:type_name -> xds.type.matcher.v3.RegexMatcher.GoogleRE2 + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_regex_proto_init() } +func file_xds_type_matcher_v3_regex_proto_init() { + if File_xds_type_matcher_v3_regex_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher_GoogleRE2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RegexMatcher_GoogleRe2)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_regex_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_regex_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_regex_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_regex_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_regex_proto = out.File + file_xds_type_matcher_v3_regex_proto_rawDesc = nil + file_xds_type_matcher_v3_regex_proto_goTypes = nil + file_xds_type_matcher_v3_regex_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go new file mode 100644 index 000000000..8b7682964 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/regex.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RegexMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RegexMatcherMultiError, or +// nil if none found. +func (m *RegexMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRegex()) < 1 { + err := RegexMatcherValidationError{ + field: "Regex", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofEngineTypePresent := false + switch v := m.EngineType.(type) { + case *RegexMatcher_GoogleRe2: + if v == nil { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofEngineTypePresent = true + + if m.GetGoogleRe2() == nil { + err := RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGoogleRe2()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofEngineTypePresent { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RegexMatcherMultiError(errors) + } + + return nil +} + +// RegexMatcherMultiError is an error wrapping multiple validation errors +// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met. +type RegexMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcherMultiError) AllErrors() []error { return m } + +// RegexMatcherValidationError is the validation error returned by +// RegexMatcher.Validate if the designated constraints aren't met. +type RegexMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e RegexMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcherValidationError{} + +// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher_GoogleRE2) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatcher_GoogleRE2MultiError, or nil if none found. +func (m *RegexMatcher_GoogleRE2) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher_GoogleRE2) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RegexMatcher_GoogleRE2MultiError(errors) + } + + return nil +} + +// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation +// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated +// constraints aren't met. +type RegexMatcher_GoogleRE2MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcher_GoogleRE2MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m } + +// RegexMatcher_GoogleRE2ValidationError is the validation error returned by +// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met. +type RegexMatcher_GoogleRE2ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string { + return "RegexMatcher_GoogleRE2ValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatcher_GoogleRE2ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher_GoogleRE2.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcher_GoogleRE2ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcher_GoogleRE2ValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go new file mode 100644 index 000000000..f9067918c --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/string.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *StringMatcher_Exact + // *StringMatcher_Prefix + // *StringMatcher_Suffix + // *StringMatcher_SafeRegex + // *StringMatcher_Contains + // *StringMatcher_Custom + MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` + IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` +} + +func (x *StringMatcher) Reset() { + *x = StringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMatcher) ProtoMessage() {} + +func (x *StringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. +func (*StringMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{0} +} + +func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *StringMatcher) GetExact() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { + return x.Exact + } + return "" +} + +func (x *StringMatcher) GetPrefix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *StringMatcher) GetSuffix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { + return x.Suffix + } + return "" +} + +func (x *StringMatcher) GetSafeRegex() *RegexMatcher { + if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *StringMatcher) GetContains() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok { + return x.Contains + } + return "" +} + +func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig { + if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok { + return x.Custom + } + return nil +} + +func (x *StringMatcher) GetIgnoreCase() bool { + if x != nil { + return x.IgnoreCase + } + return false +} + +type isStringMatcher_MatchPattern interface { + isStringMatcher_MatchPattern() +} + +type StringMatcher_Exact struct { + Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` +} + +type StringMatcher_Prefix struct { + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` +} + +type StringMatcher_Suffix struct { + Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` +} + +type StringMatcher_SafeRegex struct { + SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type StringMatcher_Contains struct { + Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` +} + +type StringMatcher_Custom struct { + Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {} + +type ListStringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *ListStringMatcher) Reset() { + *x = ListStringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListStringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListStringMatcher) ProtoMessage() {} + +func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. +func (*ListStringMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{1} +} + +func (x *ListStringMatcher) GetPatterns() []*StringMatcher { + if x != nil { + return x.Patterns + } + return nil +} + +var File_xds_type_matcher_v3_string_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_string_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, + 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4c, + 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, + 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, + 0x65, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x5d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x08, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x42, 0x5b, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_string_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_string_proto_rawDescData = file_xds_type_matcher_v3_string_proto_rawDesc +) + +func file_xds_type_matcher_v3_string_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_string_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_string_proto_rawDescData) + }) + return file_xds_type_matcher_v3_string_proto_rawDescData +} + +var file_xds_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_string_proto_goTypes = []interface{}{ + (*StringMatcher)(nil), // 0: xds.type.matcher.v3.StringMatcher + (*ListStringMatcher)(nil), // 1: xds.type.matcher.v3.ListStringMatcher + (*RegexMatcher)(nil), // 2: xds.type.matcher.v3.RegexMatcher + (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig +} +var file_xds_type_matcher_v3_string_proto_depIdxs = []int32{ + 2, // 0: xds.type.matcher.v3.StringMatcher.safe_regex:type_name -> xds.type.matcher.v3.RegexMatcher + 3, // 1: xds.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig + 0, // 2: xds.type.matcher.v3.ListStringMatcher.patterns:type_name -> xds.type.matcher.v3.StringMatcher + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_string_proto_init() } +func file_xds_type_matcher_v3_string_proto_init() { + if File_xds_type_matcher_v3_string_proto != nil { + return + } + file_xds_type_matcher_v3_regex_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StringMatcher_Exact)(nil), + (*StringMatcher_Prefix)(nil), + (*StringMatcher_Suffix)(nil), + (*StringMatcher_SafeRegex)(nil), + (*StringMatcher_Contains)(nil), + (*StringMatcher_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_string_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_string_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_string_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_string_proto = out.File + file_xds_type_matcher_v3_string_proto_rawDesc = nil + file_xds_type_matcher_v3_string_proto_goTypes = nil + file_xds_type_matcher_v3_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go new file mode 100644 index 000000000..339d3b631 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go @@ -0,0 +1,481 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/string.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StringMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StringMatcherMultiError, or +// nil if none found. +func (m *StringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for IgnoreCase + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *StringMatcher_Exact: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + case *StringMatcher_Prefix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetPrefix()) < 1 { + err := StringMatcherValidationError{ + field: "Prefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Suffix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetSuffix()) < 1 { + err := StringMatcherValidationError{ + field: "Suffix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_SafeRegex: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if m.GetSafeRegex() == nil { + err := StringMatcherValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StringMatcher_Contains: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetContains()) < 1 { + err := StringMatcherValidationError{ + field: "Contains", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Custom: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetCustom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StringMatcherMultiError(errors) + } + + return nil +} + +// StringMatcherMultiError is an error wrapping multiple validation errors +// returned by StringMatcher.ValidateAll() if the designated constraints +// aren't met. +type StringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StringMatcherMultiError) AllErrors() []error { return m } + +// StringMatcherValidationError is the validation error returned by +// StringMatcher.Validate if the designated constraints aren't met. +type StringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StringMatcherValidationError{} + +// Validate checks the field values on ListStringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListStringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListStringMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListStringMatcherMultiError, or nil if none found. +func (m *ListStringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListStringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPatterns()) < 1 { + err := ListStringMatcherValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListStringMatcherMultiError(errors) + } + + return nil +} + +// ListStringMatcherMultiError is an error wrapping multiple validation errors +// returned by ListStringMatcher.ValidateAll() if the designated constraints +// aren't met. +type ListStringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListStringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListStringMatcherMultiError) AllErrors() []error { return m } + +// ListStringMatcherValidationError is the validation error returned by +// ListStringMatcher.Validate if the designated constraints aren't met. +type ListStringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListStringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListStringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListStringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListStringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListStringMatcherValidationError) ErrorName() string { + return "ListStringMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ListStringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListStringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListStringMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go new file mode 100644 index 000000000..c7d42d4a9 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go @@ -0,0 +1,330 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/cel.proto + +package v3 + +import ( + expr "cel.dev/expr" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CelExpression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ExprSpecifier: + // + // *CelExpression_ParsedExpr + // *CelExpression_CheckedExpr + ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"` + CelExprParsed *expr.ParsedExpr `protobuf:"bytes,3,opt,name=cel_expr_parsed,json=celExprParsed,proto3" json:"cel_expr_parsed,omitempty"` + CelExprChecked *expr.CheckedExpr `protobuf:"bytes,4,opt,name=cel_expr_checked,json=celExprChecked,proto3" json:"cel_expr_checked,omitempty"` +} + +func (x *CelExpression) Reset() { + *x = CelExpression{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_cel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelExpression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelExpression) ProtoMessage() {} + +func (x *CelExpression) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_cel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelExpression.ProtoReflect.Descriptor instead. +func (*CelExpression) Descriptor() ([]byte, []int) { + return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{0} +} + +func (m *CelExpression) GetExprSpecifier() isCelExpression_ExprSpecifier { + if m != nil { + return m.ExprSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. +func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr { + if x, ok := x.GetExprSpecifier().(*CelExpression_ParsedExpr); ok { + return x.ParsedExpr + } + return nil +} + +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. +func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr { + if x, ok := x.GetExprSpecifier().(*CelExpression_CheckedExpr); ok { + return x.CheckedExpr + } + return nil +} + +func (x *CelExpression) GetCelExprParsed() *expr.ParsedExpr { + if x != nil { + return x.CelExprParsed + } + return nil +} + +func (x *CelExpression) GetCelExprChecked() *expr.CheckedExpr { + if x != nil { + return x.CelExprChecked + } + return nil +} + +type isCelExpression_ExprSpecifier interface { + isCelExpression_ExprSpecifier() +} + +type CelExpression_ParsedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. + ParsedExpr *v1alpha1.ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"` +} + +type CelExpression_CheckedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. + CheckedExpr *v1alpha1.CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"` +} + +func (*CelExpression_ParsedExpr) isCelExpression_ExprSpecifier() {} + +func (*CelExpression_CheckedExpr) isCelExpression_ExprSpecifier() {} + +type CelExtractString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"` + DefaultValue *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CelExtractString) Reset() { + *x = CelExtractString{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_cel_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelExtractString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelExtractString) ProtoMessage() {} + +func (x *CelExtractString) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_cel_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelExtractString.ProtoReflect.Descriptor instead. +func (*CelExtractString) Descriptor() ([]byte, []int) { + return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{1} +} + +func (x *CelExtractString) GetExprExtract() *CelExpression { + if x != nil { + return x.ExprExtract + } + return nil +} + +func (x *CelExtractString) GetDefaultValue() *wrapperspb.StringValue { + if x != nil { + return x.DefaultValue + } + return nil +} + +var File_xds_type_v3_cel_proto protoreflect.FileDescriptor + +var file_xds_type_v3_cel_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x63, 0x65, 0x6c, + 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x02, 0x0a, + 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4b, + 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0c, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3c, 0x0a, 0x0f, 0x63, + 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x65, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x65, 0x78, + 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, + 0x10, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, + 0x78, 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_cel_proto_rawDescOnce sync.Once + file_xds_type_v3_cel_proto_rawDescData = file_xds_type_v3_cel_proto_rawDesc +) + +func file_xds_type_v3_cel_proto_rawDescGZIP() []byte { + file_xds_type_v3_cel_proto_rawDescOnce.Do(func() { + file_xds_type_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_cel_proto_rawDescData) + }) + return file_xds_type_v3_cel_proto_rawDescData +} + +var file_xds_type_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_v3_cel_proto_goTypes = []interface{}{ + (*CelExpression)(nil), // 0: xds.type.v3.CelExpression + (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString + (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr + (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr + (*expr.ParsedExpr)(nil), // 4: cel.expr.ParsedExpr + (*expr.CheckedExpr)(nil), // 5: cel.expr.CheckedExpr + (*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue +} +var file_xds_type_v3_cel_proto_depIdxs = []int32{ + 2, // 0: xds.type.v3.CelExpression.parsed_expr:type_name -> google.api.expr.v1alpha1.ParsedExpr + 3, // 1: xds.type.v3.CelExpression.checked_expr:type_name -> google.api.expr.v1alpha1.CheckedExpr + 4, // 2: xds.type.v3.CelExpression.cel_expr_parsed:type_name -> cel.expr.ParsedExpr + 5, // 3: xds.type.v3.CelExpression.cel_expr_checked:type_name -> cel.expr.CheckedExpr + 0, // 4: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression + 6, // 5: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_cel_proto_init() } +func file_xds_type_v3_cel_proto_init() { + if File_xds_type_v3_cel_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelExpression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_cel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelExtractString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_v3_cel_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CelExpression_ParsedExpr)(nil), + (*CelExpression_CheckedExpr)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_cel_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_cel_proto_goTypes, + DependencyIndexes: file_xds_type_v3_cel_proto_depIdxs, + MessageInfos: file_xds_type_v3_cel_proto_msgTypes, + }.Build() + File_xds_type_v3_cel_proto = out.File + file_xds_type_v3_cel_proto_rawDesc = nil + file_xds_type_v3_cel_proto_goTypes = nil + file_xds_type_v3_cel_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go new file mode 100644 index 000000000..0855edee9 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/cel.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CelExpression with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CelExpression) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelExpression with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CelExpressionMultiError, or +// nil if none found. +func (m *CelExpression) ValidateAll() error { + return m.validate(true) +} + +func (m *CelExpression) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCelExprParsed()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprParsed()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCelExprChecked()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprChecked()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ExprSpecifier.(type) { + case *CelExpression_ParsedExpr: + if v == nil { + err := CelExpressionValidationError{ + field: "ExprSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetParsedExpr()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParsedExpr()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CelExpression_CheckedExpr: + if v == nil { + err := CelExpressionValidationError{ + field: "ExprSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCheckedExpr()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCheckedExpr()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CelExpressionMultiError(errors) + } + + return nil +} + +// CelExpressionMultiError is an error wrapping multiple validation errors +// returned by CelExpression.ValidateAll() if the designated constraints +// aren't met. +type CelExpressionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelExpressionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelExpressionMultiError) AllErrors() []error { return m } + +// CelExpressionValidationError is the validation error returned by +// CelExpression.Validate if the designated constraints aren't met. +type CelExpressionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelExpressionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelExpressionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelExpressionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelExpressionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelExpressionValidationError) ErrorName() string { return "CelExpressionValidationError" } + +// Error satisfies the builtin error interface +func (e CelExpressionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelExpression.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelExpressionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelExpressionValidationError{} + +// Validate checks the field values on CelExtractString with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CelExtractString) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelExtractString with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CelExtractStringMultiError, or nil if none found. +func (m *CelExtractString) ValidateAll() error { + return m.validate(true) +} + +func (m *CelExtractString) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExprExtract() == nil { + err := CelExtractStringValidationError{ + field: "ExprExtract", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExprExtract()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExprExtract()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CelExtractStringMultiError(errors) + } + + return nil +} + +// CelExtractStringMultiError is an error wrapping multiple validation errors +// returned by CelExtractString.ValidateAll() if the designated constraints +// aren't met. +type CelExtractStringMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelExtractStringMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelExtractStringMultiError) AllErrors() []error { return m } + +// CelExtractStringValidationError is the validation error returned by +// CelExtractString.Validate if the designated constraints aren't met. +type CelExtractStringValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelExtractStringValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelExtractStringValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelExtractStringValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelExtractStringValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelExtractStringValidationError) ErrorName() string { return "CelExtractStringValidationError" } + +// Error satisfies the builtin error interface +func (e CelExtractStringValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelExtractString.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelExtractStringValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelExtractStringValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go new file mode 100644 index 000000000..ca9d3e1b7 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go @@ -0,0 +1,298 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/range.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Int64Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int64Range) Reset() { + *x = Int64Range{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Range) ProtoMessage() {} + +func (x *Int64Range) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead. +func (*Int64Range) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64Range) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int64Range) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +type Int32Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int32Range) Reset() { + *x = Int32Range{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Range) ProtoMessage() {} + +func (x *Int32Range) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead. +func (*Int32Range) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32Range) GetStart() int32 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int32Range) GetEnd() int32 { + if x != nil { + return x.End + } + return 0 +} + +type DoubleRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` + End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *DoubleRange) Reset() { + *x = DoubleRange{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRange) ProtoMessage() {} + +func (x *DoubleRange) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead. +func (*DoubleRange) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRange) GetStart() float64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *DoubleRange) GetEnd() float64 { + if x != nil { + return x.End + } + return 0 +} + +var File_xds_type_v3_range_proto protoreflect.FileDescriptor + +var file_xds_type_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x4a, 0x0a, 0x16, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_range_proto_rawDescOnce sync.Once + file_xds_type_v3_range_proto_rawDescData = file_xds_type_v3_range_proto_rawDesc +) + +func file_xds_type_v3_range_proto_rawDescGZIP() []byte { + file_xds_type_v3_range_proto_rawDescOnce.Do(func() { + file_xds_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_range_proto_rawDescData) + }) + return file_xds_type_v3_range_proto_rawDescData +} + +var file_xds_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_xds_type_v3_range_proto_goTypes = []interface{}{ + (*Int64Range)(nil), // 0: xds.type.v3.Int64Range + (*Int32Range)(nil), // 1: xds.type.v3.Int32Range + (*DoubleRange)(nil), // 2: xds.type.v3.DoubleRange +} +var file_xds_type_v3_range_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_range_proto_init() } +func file_xds_type_v3_range_proto_init() { + if File_xds_type_v3_range_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_range_proto_goTypes, + DependencyIndexes: file_xds_type_v3_range_proto_depIdxs, + MessageInfos: file_xds_type_v3_range_proto_msgTypes, + }.Build() + File_xds_type_v3_range_proto = out.File + file_xds_type_v3_range_proto_rawDesc = nil + file_xds_type_v3_range_proto_goTypes = nil + file_xds_type_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go new file mode 100644 index 000000000..ccaf418e5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go @@ -0,0 +1,345 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/range.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int64Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int64RangeMultiError, or +// nil if none found. +func (m *Int64Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int64RangeMultiError(errors) + } + + return nil +} + +// Int64RangeMultiError is an error wrapping multiple validation errors +// returned by Int64Range.ValidateAll() if the designated constraints aren't met. +type Int64RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMultiError) AllErrors() []error { return m } + +// Int64RangeValidationError is the validation error returned by +// Int64Range.Validate if the designated constraints aren't met. +type Int64RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int64RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeValidationError{} + +// Validate checks the field values on Int32Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int32Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int32RangeMultiError, or +// nil if none found. +func (m *Int32Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int32RangeMultiError(errors) + } + + return nil +} + +// Int32RangeMultiError is an error wrapping multiple validation errors +// returned by Int32Range.ValidateAll() if the designated constraints aren't met. +type Int32RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMultiError) AllErrors() []error { return m } + +// Int32RangeValidationError is the validation error returned by +// Int32Range.Validate if the designated constraints aren't met. +type Int32RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int32RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeValidationError{} + +// Validate checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleRangeMultiError, or +// nil if none found. +func (m *DoubleRange) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return DoubleRangeMultiError(errors) + } + + return nil +} + +// DoubleRangeMultiError is an error wrapping multiple validation errors +// returned by DoubleRange.ValidateAll() if the designated constraints aren't met. +type DoubleRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMultiError) AllErrors() []error { return m } + +// DoubleRangeValidationError is the validation error returned by +// DoubleRange.Validate if the designated constraints aren't met. +type DoubleRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go new file mode 100644 index 000000000..72ec85ed6 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/typed_struct.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypedStruct) Reset() { + *x = TypedStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedStruct) ProtoMessage() {} + +func (x *TypedStruct) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead. +func (*TypedStruct) Descriptor() ([]byte, []int) { + return file_xds_type_v3_typed_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedStruct) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *TypedStruct) GetValue() *structpb.Struct { + if x != nil { + return x.Value + } + return nil +} + +var File_xds_type_v3_typed_struct_proto protoreflect.FileDescriptor + +var file_xds_type_v3_typed_struct_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0b, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, + 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_typed_struct_proto_rawDescOnce sync.Once + file_xds_type_v3_typed_struct_proto_rawDescData = file_xds_type_v3_typed_struct_proto_rawDesc +) + +func file_xds_type_v3_typed_struct_proto_rawDescGZIP() []byte { + file_xds_type_v3_typed_struct_proto_rawDescOnce.Do(func() { + file_xds_type_v3_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_typed_struct_proto_rawDescData) + }) + return file_xds_type_v3_typed_struct_proto_rawDescData +} + +var file_xds_type_v3_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_v3_typed_struct_proto_goTypes = []interface{}{ + (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_xds_type_v3_typed_struct_proto_depIdxs = []int32{ + 1, // 0: xds.type.v3.TypedStruct.value:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_typed_struct_proto_init() } +func file_xds_type_v3_typed_struct_proto_init() { + if File_xds_type_v3_typed_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_typed_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_typed_struct_proto_goTypes, + DependencyIndexes: file_xds_type_v3_typed_struct_proto_depIdxs, + MessageInfos: file_xds_type_v3_typed_struct_proto_msgTypes, + }.Build() + File_xds_type_v3_typed_struct_proto = out.File + file_xds_type_v3_typed_struct_proto_rawDesc = nil + file_xds_type_v3_typed_struct_proto_goTypes = nil + file_xds_type_v3_typed_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go new file mode 100644 index 000000000..f39bce906 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/typed_struct.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TypedStruct) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TypedStructMultiError, or +// nil if none found. +func (m *TypedStruct) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedStruct) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TypedStructMultiError(errors) + } + + return nil +} + +// TypedStructMultiError is an error wrapping multiple validation errors +// returned by TypedStruct.ValidateAll() if the designated constraints aren't met. +type TypedStructMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedStructMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedStructMultiError) AllErrors() []error { return m } + +// TypedStructValidationError is the validation error returned by +// TypedStruct.Validate if the designated constraints aren't met. +type TypedStructValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedStructValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedStructValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedStructValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedStructValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" } + +// Error satisfies the builtin error interface +func (e TypedStructValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedStruct.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedStructValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedStructValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/LICENSE b/vendor/github.com/envoyproxy/go-control-plane/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go new file mode 100644 index 000000000..13d644dba --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go @@ -0,0 +1,607 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of certificate details. Admin endpoint uses this wrapper for “/certs“ to +// display certificate information. See :ref:`/certs ` for more +// information. +type Certificates struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of certificates known to an Envoy. + Certificates []*Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` +} + +func (x *Certificates) Reset() { + *x = Certificates{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificates) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificates) ProtoMessage() {} + +func (x *Certificates) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificates.ProtoReflect.Descriptor instead. +func (*Certificates) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{0} +} + +func (x *Certificates) GetCertificates() []*Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +type Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Details of CA certificate. + CaCert []*CertificateDetails `protobuf:"bytes,1,rep,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // Details of Certificate Chain + CertChain []*CertificateDetails `protobuf:"bytes,2,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` +} + +func (x *Certificate) Reset() { + *x = Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificate) ProtoMessage() {} + +func (x *Certificate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. +func (*Certificate) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{1} +} + +func (x *Certificate) GetCaCert() []*CertificateDetails { + if x != nil { + return x.CaCert + } + return nil +} + +func (x *Certificate) GetCertChain() []*CertificateDetails { + if x != nil { + return x.CertChain + } + return nil +} + +// [#next-free-field: 8] +type CertificateDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path of the certificate. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // Certificate Serial Number. + SerialNumber string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // List of Subject Alternate names. + SubjectAltNames []*SubjectAlternateName `protobuf:"bytes,3,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"` + // Minimum of days until expiration of certificate and it's chain. + DaysUntilExpiration uint64 `protobuf:"varint,4,opt,name=days_until_expiration,json=daysUntilExpiration,proto3" json:"days_until_expiration,omitempty"` + // Indicates the time from which the certificate is valid. + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + // Indicates the time at which the certificate expires. + ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails *CertificateDetails_OcspDetails `protobuf:"bytes,7,opt,name=ocsp_details,json=ocspDetails,proto3" json:"ocsp_details,omitempty"` +} + +func (x *CertificateDetails) Reset() { + *x = CertificateDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateDetails) ProtoMessage() {} + +func (x *CertificateDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateDetails.ProtoReflect.Descriptor instead. +func (*CertificateDetails) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2} +} + +func (x *CertificateDetails) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CertificateDetails) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *CertificateDetails) GetSubjectAltNames() []*SubjectAlternateName { + if x != nil { + return x.SubjectAltNames + } + return nil +} + +func (x *CertificateDetails) GetDaysUntilExpiration() uint64 { + if x != nil { + return x.DaysUntilExpiration + } + return 0 +} + +func (x *CertificateDetails) GetValidFrom() *timestamppb.Timestamp { + if x != nil { + return x.ValidFrom + } + return nil +} + +func (x *CertificateDetails) GetExpirationTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpirationTime + } + return nil +} + +func (x *CertificateDetails) GetOcspDetails() *CertificateDetails_OcspDetails { + if x != nil { + return x.OcspDetails + } + return nil +} + +type SubjectAlternateName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Subject Alternate Name. + // + // Types that are assignable to Name: + // + // *SubjectAlternateName_Dns + // *SubjectAlternateName_Uri + // *SubjectAlternateName_IpAddress + Name isSubjectAlternateName_Name `protobuf_oneof:"name"` +} + +func (x *SubjectAlternateName) Reset() { + *x = SubjectAlternateName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubjectAlternateName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternateName) ProtoMessage() {} + +func (x *SubjectAlternateName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternateName.ProtoReflect.Descriptor instead. +func (*SubjectAlternateName) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{3} +} + +func (m *SubjectAlternateName) GetName() isSubjectAlternateName_Name { + if m != nil { + return m.Name + } + return nil +} + +func (x *SubjectAlternateName) GetDns() string { + if x, ok := x.GetName().(*SubjectAlternateName_Dns); ok { + return x.Dns + } + return "" +} + +func (x *SubjectAlternateName) GetUri() string { + if x, ok := x.GetName().(*SubjectAlternateName_Uri); ok { + return x.Uri + } + return "" +} + +func (x *SubjectAlternateName) GetIpAddress() string { + if x, ok := x.GetName().(*SubjectAlternateName_IpAddress); ok { + return x.IpAddress + } + return "" +} + +type isSubjectAlternateName_Name interface { + isSubjectAlternateName_Name() +} + +type SubjectAlternateName_Dns struct { + Dns string `protobuf:"bytes,1,opt,name=dns,proto3,oneof"` +} + +type SubjectAlternateName_Uri struct { + Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"` +} + +type SubjectAlternateName_IpAddress struct { + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3,oneof"` +} + +func (*SubjectAlternateName_Dns) isSubjectAlternateName_Name() {} + +func (*SubjectAlternateName_Uri) isSubjectAlternateName_Name() {} + +func (*SubjectAlternateName_IpAddress) isSubjectAlternateName_Name() {} + +type CertificateDetails_OcspDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates the time from which the OCSP response is valid. + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + // Indicates the time at which the OCSP response expires. + Expiration *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *CertificateDetails_OcspDetails) Reset() { + *x = CertificateDetails_OcspDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateDetails_OcspDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateDetails_OcspDetails) ProtoMessage() {} + +func (x *CertificateDetails_OcspDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateDetails_OcspDetails.ProtoReflect.Descriptor instead. +func (*CertificateDetails_OcspDetails) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamppb.Timestamp { + if x != nil { + return x.ValidFrom + } + return nil +} + +func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +var File_envoy_admin_v3_certs_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_certs_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x78, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, + 0x3f, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0b, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x06, + 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x09, + 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x22, 0xdc, 0x04, 0x0a, 0x12, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x75, 0x6e, 0x74, 0x69, + 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x45, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72, + 0x6f, 0x6d, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0b, 0x6f, + 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0b, 0x4f, + 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x98, 0x01, 0x0a, 0x14, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x12, 0x0a, + 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, + 0x69, 0x12, 0x1f, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x73, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x43, 0x65, 0x72, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_certs_proto_rawDescOnce sync.Once + file_envoy_admin_v3_certs_proto_rawDescData = file_envoy_admin_v3_certs_proto_rawDesc +) + +func file_envoy_admin_v3_certs_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_certs_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_certs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_certs_proto_rawDescData) + }) + return file_envoy_admin_v3_certs_proto_rawDescData +} + +var file_envoy_admin_v3_certs_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_admin_v3_certs_proto_goTypes = []interface{}{ + (*Certificates)(nil), // 0: envoy.admin.v3.Certificates + (*Certificate)(nil), // 1: envoy.admin.v3.Certificate + (*CertificateDetails)(nil), // 2: envoy.admin.v3.CertificateDetails + (*SubjectAlternateName)(nil), // 3: envoy.admin.v3.SubjectAlternateName + (*CertificateDetails_OcspDetails)(nil), // 4: envoy.admin.v3.CertificateDetails.OcspDetails + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_envoy_admin_v3_certs_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Certificates.certificates:type_name -> envoy.admin.v3.Certificate + 2, // 1: envoy.admin.v3.Certificate.ca_cert:type_name -> envoy.admin.v3.CertificateDetails + 2, // 2: envoy.admin.v3.Certificate.cert_chain:type_name -> envoy.admin.v3.CertificateDetails + 3, // 3: envoy.admin.v3.CertificateDetails.subject_alt_names:type_name -> envoy.admin.v3.SubjectAlternateName + 5, // 4: envoy.admin.v3.CertificateDetails.valid_from:type_name -> google.protobuf.Timestamp + 5, // 5: envoy.admin.v3.CertificateDetails.expiration_time:type_name -> google.protobuf.Timestamp + 4, // 6: envoy.admin.v3.CertificateDetails.ocsp_details:type_name -> envoy.admin.v3.CertificateDetails.OcspDetails + 5, // 7: envoy.admin.v3.CertificateDetails.OcspDetails.valid_from:type_name -> google.protobuf.Timestamp + 5, // 8: envoy.admin.v3.CertificateDetails.OcspDetails.expiration:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_certs_proto_init() } +func file_envoy_admin_v3_certs_proto_init() { + if File_envoy_admin_v3_certs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_certs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificates); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAlternateName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateDetails_OcspDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_admin_v3_certs_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*SubjectAlternateName_Dns)(nil), + (*SubjectAlternateName_Uri)(nil), + (*SubjectAlternateName_IpAddress)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_certs_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_certs_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_certs_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_certs_proto_msgTypes, + }.Build() + File_envoy_admin_v3_certs_proto = out.File + file_envoy_admin_v3_certs_proto_rawDesc = nil + file_envoy_admin_v3_certs_proto_goTypes = nil + file_envoy_admin_v3_certs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go new file mode 100644 index 000000000..413895689 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go @@ -0,0 +1,870 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Certificates with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Certificates) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Certificates with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CertificatesMultiError, or +// nil if none found. +func (m *Certificates) ValidateAll() error { + return m.validate(true) +} + +func (m *Certificates) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetCertificates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CertificatesMultiError(errors) + } + + return nil +} + +// CertificatesMultiError is an error wrapping multiple validation errors +// returned by Certificates.ValidateAll() if the designated constraints aren't met. +type CertificatesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificatesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificatesMultiError) AllErrors() []error { return m } + +// CertificatesValidationError is the validation error returned by +// Certificates.Validate if the designated constraints aren't met. +type CertificatesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificatesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificatesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificatesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificatesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificatesValidationError) ErrorName() string { return "CertificatesValidationError" } + +// Error satisfies the builtin error interface +func (e CertificatesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificates.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificatesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificatesValidationError{} + +// Validate checks the field values on Certificate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Certificate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Certificate with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CertificateMultiError, or +// nil if none found. +func (m *Certificate) ValidateAll() error { + return m.validate(true) +} + +func (m *Certificate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetCaCert() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetCertChain() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CertificateMultiError(errors) + } + + return nil +} + +// CertificateMultiError is an error wrapping multiple validation errors +// returned by Certificate.ValidateAll() if the designated constraints aren't met. +type CertificateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateMultiError) AllErrors() []error { return m } + +// CertificateValidationError is the validation error returned by +// Certificate.Validate if the designated constraints aren't met. +type CertificateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationError) ErrorName() string { return "CertificateValidationError" } + +// Error satisfies the builtin error interface +func (e CertificateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationError{} + +// Validate checks the field values on CertificateDetails with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateDetails with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateDetailsMultiError, or nil if none found. +func (m *CertificateDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Path + + // no validation rules for SerialNumber + + for idx, item := range m.GetSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for DaysUntilExpiration + + if all { + switch v := interface{}(m.GetValidFrom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpirationTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpirationTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOcspDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcspDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CertificateDetailsMultiError(errors) + } + + return nil +} + +// CertificateDetailsMultiError is an error wrapping multiple validation errors +// returned by CertificateDetails.ValidateAll() if the designated constraints +// aren't met. +type CertificateDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateDetailsMultiError) AllErrors() []error { return m } + +// CertificateDetailsValidationError is the validation error returned by +// CertificateDetails.Validate if the designated constraints aren't met. +type CertificateDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateDetailsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateDetailsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateDetailsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateDetailsValidationError) ErrorName() string { + return "CertificateDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateDetailsValidationError{} + +// Validate checks the field values on SubjectAlternateName with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubjectAlternateName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubjectAlternateName with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubjectAlternateNameMultiError, or nil if none found. +func (m *SubjectAlternateName) ValidateAll() error { + return m.validate(true) +} + +func (m *SubjectAlternateName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Name.(type) { + case *SubjectAlternateName_Dns: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Dns + case *SubjectAlternateName_Uri: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Uri + case *SubjectAlternateName_IpAddress: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for IpAddress + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SubjectAlternateNameMultiError(errors) + } + + return nil +} + +// SubjectAlternateNameMultiError is an error wrapping multiple validation +// errors returned by SubjectAlternateName.ValidateAll() if the designated +// constraints aren't met. +type SubjectAlternateNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubjectAlternateNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubjectAlternateNameMultiError) AllErrors() []error { return m } + +// SubjectAlternateNameValidationError is the validation error returned by +// SubjectAlternateName.Validate if the designated constraints aren't met. +type SubjectAlternateNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubjectAlternateNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubjectAlternateNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubjectAlternateNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubjectAlternateNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubjectAlternateNameValidationError) ErrorName() string { + return "SubjectAlternateNameValidationError" +} + +// Error satisfies the builtin error interface +func (e SubjectAlternateNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubjectAlternateName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubjectAlternateNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubjectAlternateNameValidationError{} + +// Validate checks the field values on CertificateDetails_OcspDetails with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateDetails_OcspDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateDetails_OcspDetails with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// CertificateDetails_OcspDetailsMultiError, or nil if none found. +func (m *CertificateDetails_OcspDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateDetails_OcspDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetValidFrom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpiration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpiration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CertificateDetails_OcspDetailsMultiError(errors) + } + + return nil +} + +// CertificateDetails_OcspDetailsMultiError is an error wrapping multiple +// validation errors returned by CertificateDetails_OcspDetails.ValidateAll() +// if the designated constraints aren't met. +type CertificateDetails_OcspDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateDetails_OcspDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateDetails_OcspDetailsMultiError) AllErrors() []error { return m } + +// CertificateDetails_OcspDetailsValidationError is the validation error +// returned by CertificateDetails_OcspDetails.Validate if the designated +// constraints aren't met. +type CertificateDetails_OcspDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateDetails_OcspDetailsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateDetails_OcspDetailsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateDetails_OcspDetailsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateDetails_OcspDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateDetails_OcspDetailsValidationError) ErrorName() string { + return "CertificateDetails_OcspDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateDetails_OcspDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateDetails_OcspDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateDetails_OcspDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateDetails_OcspDetailsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go new file mode 100644 index 000000000..3c325787d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go @@ -0,0 +1,504 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Certificates) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificates) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificates) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Certificates) > 0 { + for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Certificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Certificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertChain) > 0 { + for iNdEx := len(m.CertChain) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CertChain[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.CaCert) > 0 { + for iNdEx := len(m.CaCert) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CaCert[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails_OcspDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails_OcspDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails_OcspDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Expiration != nil { + size, err := (*timestamppb.Timestamp)(m.Expiration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcspDetails != nil { + size, err := m.OcspDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.ExpirationTime != nil { + size, err := (*timestamppb.Timestamp)(m.ExpirationTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DaysUntilExpiration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DaysUntilExpiration)) + i-- + dAtA[i] = 0x20 + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.SerialNumber) > 0 { + i -= len(m.SerialNumber) + copy(dAtA[i:], m.SerialNumber) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SerialNumber))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAlternateName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Name.(*SubjectAlternateName_IpAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_IpAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_IpAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.IpAddress) + copy(dAtA[i:], m.IpAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.IpAddress))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Certificates) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Certificates) > 0 { + for _, e := range m.Certificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Certificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CaCert) > 0 { + for _, e := range m.CaCert { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CertChain) > 0 { + for _, e := range m.CertChain { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails_OcspDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Expiration != nil { + l = (*timestamppb.Timestamp)(m.Expiration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SerialNumber) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubjectAltNames) > 0 { + for _, e := range m.SubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DaysUntilExpiration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DaysUntilExpiration)) + } + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpirationTime != nil { + l = (*timestamppb.Timestamp)(m.ExpirationTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspDetails != nil { + l = m.OcspDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Name.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_IpAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IpAddress) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go new file mode 100644 index 000000000..06b79187f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go @@ -0,0 +1,744 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Admin endpoint uses this wrapper for “/clusters“ to display cluster status information. +// See :ref:`/clusters ` for more information. +type Clusters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Mapping from cluster name to each cluster's status. + ClusterStatuses []*ClusterStatus `protobuf:"bytes,1,rep,name=cluster_statuses,json=clusterStatuses,proto3" json:"cluster_statuses,omitempty"` +} + +func (x *Clusters) Reset() { + *x = Clusters{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Clusters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Clusters) ProtoMessage() {} + +func (x *Clusters) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Clusters.ProtoReflect.Descriptor instead. +func (*Clusters) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{0} +} + +func (x *Clusters) GetClusterStatuses() []*ClusterStatus { + if x != nil { + return x.ClusterStatuses + } + return nil +} + +// Details an individual cluster's current status. +// [#next-free-field: 9] +type ClusterStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the cluster. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Denotes whether this cluster was added via API or configured statically. + AddedViaApi bool `protobuf:"varint,2,opt,name=added_via_api,json=addedViaApi,proto3" json:"added_via_api,omitempty"` + // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “false“, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + SuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,3,opt,name=success_rate_ejection_threshold,json=successRateEjectionThreshold,proto3" json:"success_rate_ejection_threshold,omitempty"` + // Mapping from host address to the host's current status. + HostStatuses []*HostStatus `protobuf:"bytes,4,rep,name=host_statuses,json=hostStatuses,proto3" json:"host_statuses,omitempty"` + // The success rate threshold used in the last interval when only locally originated failures were + // taken into account and externally originated errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + LocalOriginSuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,5,opt,name=local_origin_success_rate_ejection_threshold,json=localOriginSuccessRateEjectionThreshold,proto3" json:"local_origin_success_rate_ejection_threshold,omitempty"` + // :ref:`Circuit breaking ` settings of the cluster. + CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` + // Observability name of the cluster. + ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"` + // The :ref:`EDS service name ` if the cluster is an EDS cluster. + EdsServiceName string `protobuf:"bytes,8,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` +} + +func (x *ClusterStatus) Reset() { + *x = ClusterStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStatus) ProtoMessage() {} + +func (x *ClusterStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead. +func (*ClusterStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{1} +} + +func (x *ClusterStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClusterStatus) GetAddedViaApi() bool { + if x != nil { + return x.AddedViaApi + } + return false +} + +func (x *ClusterStatus) GetSuccessRateEjectionThreshold() *v3.Percent { + if x != nil { + return x.SuccessRateEjectionThreshold + } + return nil +} + +func (x *ClusterStatus) GetHostStatuses() []*HostStatus { + if x != nil { + return x.HostStatuses + } + return nil +} + +func (x *ClusterStatus) GetLocalOriginSuccessRateEjectionThreshold() *v3.Percent { + if x != nil { + return x.LocalOriginSuccessRateEjectionThreshold + } + return nil +} + +func (x *ClusterStatus) GetCircuitBreakers() *v31.CircuitBreakers { + if x != nil { + return x.CircuitBreakers + } + return nil +} + +func (x *ClusterStatus) GetObservabilityName() string { + if x != nil { + return x.ObservabilityName + } + return "" +} + +func (x *ClusterStatus) GetEdsServiceName() string { + if x != nil { + return x.EdsServiceName + } + return "" +} + +// Current state of a particular host. +// [#next-free-field: 10] +type HostStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of this host. + Address *v32.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // List of stats specific to this host. + Stats []*SimpleMetric `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty"` + // The host's current health status. + HealthStatus *HostHealthStatus `protobuf:"bytes,3,opt,name=health_status,json=healthStatus,proto3" json:"health_status,omitempty"` + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “false“, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + SuccessRate *v3.Percent `protobuf:"bytes,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` + // The host's weight. If not configured, the value defaults to 1. + Weight uint32 `protobuf:"varint,5,opt,name=weight,proto3" json:"weight,omitempty"` + // The hostname of the host, if applicable. + Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` + // The host's priority. If not configured, the value defaults to 0 (highest priority). + Priority uint32 `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"` + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + LocalOriginSuccessRate *v3.Percent `protobuf:"bytes,8,opt,name=local_origin_success_rate,json=localOriginSuccessRate,proto3" json:"local_origin_success_rate,omitempty"` + // locality of the host. + Locality *v32.Locality `protobuf:"bytes,9,opt,name=locality,proto3" json:"locality,omitempty"` +} + +func (x *HostStatus) Reset() { + *x = HostStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HostStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostStatus) ProtoMessage() {} + +func (x *HostStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostStatus.ProtoReflect.Descriptor instead. +func (*HostStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{2} +} + +func (x *HostStatus) GetAddress() *v32.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *HostStatus) GetStats() []*SimpleMetric { + if x != nil { + return x.Stats + } + return nil +} + +func (x *HostStatus) GetHealthStatus() *HostHealthStatus { + if x != nil { + return x.HealthStatus + } + return nil +} + +func (x *HostStatus) GetSuccessRate() *v3.Percent { + if x != nil { + return x.SuccessRate + } + return nil +} + +func (x *HostStatus) GetWeight() uint32 { + if x != nil { + return x.Weight + } + return 0 +} + +func (x *HostStatus) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *HostStatus) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *HostStatus) GetLocalOriginSuccessRate() *v3.Percent { + if x != nil { + return x.LocalOriginSuccessRate + } + return nil +} + +func (x *HostStatus) GetLocality() *v32.Locality { + if x != nil { + return x.Locality + } + return nil +} + +// Health status for a host. +// [#next-free-field: 9] +type HostHealthStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The host is currently failing active health checks. + FailedActiveHealthCheck bool `protobuf:"varint,1,opt,name=failed_active_health_check,json=failedActiveHealthCheck,proto3" json:"failed_active_health_check,omitempty"` + // The host is currently considered an outlier and has been ejected. + FailedOutlierCheck bool `protobuf:"varint,2,opt,name=failed_outlier_check,json=failedOutlierCheck,proto3" json:"failed_outlier_check,omitempty"` + // The host is currently being marked as degraded through active health checking. + FailedActiveDegradedCheck bool `protobuf:"varint,4,opt,name=failed_active_degraded_check,json=failedActiveDegradedCheck,proto3" json:"failed_active_degraded_check,omitempty"` + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + PendingDynamicRemoval bool `protobuf:"varint,5,opt,name=pending_dynamic_removal,json=pendingDynamicRemoval,proto3" json:"pending_dynamic_removal,omitempty"` + // The host has not yet been health checked. + PendingActiveHc bool `protobuf:"varint,6,opt,name=pending_active_hc,json=pendingActiveHc,proto3" json:"pending_active_hc,omitempty"` + // The host should be excluded from panic, spillover, etc. calculations because it was explicitly + // taken out of rotation via protocol signal and is not meant to be routed to. + ExcludedViaImmediateHcFail bool `protobuf:"varint,7,opt,name=excluded_via_immediate_hc_fail,json=excludedViaImmediateHcFail,proto3" json:"excluded_via_immediate_hc_fail,omitempty"` + // The host failed active HC due to timeout. + ActiveHcTimeout bool `protobuf:"varint,8,opt,name=active_hc_timeout,json=activeHcTimeout,proto3" json:"active_hc_timeout,omitempty"` + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported + // here. + // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] + EdsHealthStatus v32.HealthStatus `protobuf:"varint,3,opt,name=eds_health_status,json=edsHealthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"eds_health_status,omitempty"` +} + +func (x *HostHealthStatus) Reset() { + *x = HostHealthStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HostHealthStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostHealthStatus) ProtoMessage() {} + +func (x *HostHealthStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostHealthStatus.ProtoReflect.Descriptor instead. +func (*HostHealthStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{3} +} + +func (x *HostHealthStatus) GetFailedActiveHealthCheck() bool { + if x != nil { + return x.FailedActiveHealthCheck + } + return false +} + +func (x *HostHealthStatus) GetFailedOutlierCheck() bool { + if x != nil { + return x.FailedOutlierCheck + } + return false +} + +func (x *HostHealthStatus) GetFailedActiveDegradedCheck() bool { + if x != nil { + return x.FailedActiveDegradedCheck + } + return false +} + +func (x *HostHealthStatus) GetPendingDynamicRemoval() bool { + if x != nil { + return x.PendingDynamicRemoval + } + return false +} + +func (x *HostHealthStatus) GetPendingActiveHc() bool { + if x != nil { + return x.PendingActiveHc + } + return false +} + +func (x *HostHealthStatus) GetExcludedViaImmediateHcFail() bool { + if x != nil { + return x.ExcludedViaImmediateHcFail + } + return false +} + +func (x *HostHealthStatus) GetActiveHcTimeout() bool { + if x != nil { + return x.ActiveHcTimeout + } + return false +} + +func (x *HostHealthStatus) GetEdsHealthStatus() v32.HealthStatus { + if x != nil { + return x.EdsHealthStatus + } + return v32.HealthStatus(0) +} + +var File_envoy_admin_v3_clusters_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, + 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62, + 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x08, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a, + 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64, + 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x41, 0x70, 0x69, 0x12, 0x5d, + 0x0a, 0x1f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x1c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3f, 0x0a, + 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x75, + 0x0a, 0x2c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x27, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, + 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04, + 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a, + 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64, + 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_clusters_proto_rawDescOnce sync.Once + file_envoy_admin_v3_clusters_proto_rawDescData = file_envoy_admin_v3_clusters_proto_rawDesc +) + +func file_envoy_admin_v3_clusters_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_clusters_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_clusters_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_clusters_proto_rawDescData) + }) + return file_envoy_admin_v3_clusters_proto_rawDescData +} + +var file_envoy_admin_v3_clusters_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_admin_v3_clusters_proto_goTypes = []interface{}{ + (*Clusters)(nil), // 0: envoy.admin.v3.Clusters + (*ClusterStatus)(nil), // 1: envoy.admin.v3.ClusterStatus + (*HostStatus)(nil), // 2: envoy.admin.v3.HostStatus + (*HostHealthStatus)(nil), // 3: envoy.admin.v3.HostHealthStatus + (*v3.Percent)(nil), // 4: envoy.type.v3.Percent + (*v31.CircuitBreakers)(nil), // 5: envoy.config.cluster.v3.CircuitBreakers + (*v32.Address)(nil), // 6: envoy.config.core.v3.Address + (*SimpleMetric)(nil), // 7: envoy.admin.v3.SimpleMetric + (*v32.Locality)(nil), // 8: envoy.config.core.v3.Locality + (v32.HealthStatus)(0), // 9: envoy.config.core.v3.HealthStatus +} +var file_envoy_admin_v3_clusters_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Clusters.cluster_statuses:type_name -> envoy.admin.v3.ClusterStatus + 4, // 1: envoy.admin.v3.ClusterStatus.success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent + 2, // 2: envoy.admin.v3.ClusterStatus.host_statuses:type_name -> envoy.admin.v3.HostStatus + 4, // 3: envoy.admin.v3.ClusterStatus.local_origin_success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent + 5, // 4: envoy.admin.v3.ClusterStatus.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers + 6, // 5: envoy.admin.v3.HostStatus.address:type_name -> envoy.config.core.v3.Address + 7, // 6: envoy.admin.v3.HostStatus.stats:type_name -> envoy.admin.v3.SimpleMetric + 3, // 7: envoy.admin.v3.HostStatus.health_status:type_name -> envoy.admin.v3.HostHealthStatus + 4, // 8: envoy.admin.v3.HostStatus.success_rate:type_name -> envoy.type.v3.Percent + 4, // 9: envoy.admin.v3.HostStatus.local_origin_success_rate:type_name -> envoy.type.v3.Percent + 8, // 10: envoy.admin.v3.HostStatus.locality:type_name -> envoy.config.core.v3.Locality + 9, // 11: envoy.admin.v3.HostHealthStatus.eds_health_status:type_name -> envoy.config.core.v3.HealthStatus + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_clusters_proto_init() } +func file_envoy_admin_v3_clusters_proto_init() { + if File_envoy_admin_v3_clusters_proto != nil { + return + } + file_envoy_admin_v3_metrics_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_clusters_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Clusters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HostStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HostHealthStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_clusters_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_clusters_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_clusters_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_clusters_proto_msgTypes, + }.Build() + File_envoy_admin_v3_clusters_proto = out.File + file_envoy_admin_v3_clusters_proto_rawDesc = nil + file_envoy_admin_v3_clusters_proto_goTypes = nil + file_envoy_admin_v3_clusters_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go new file mode 100644 index 000000000..d7658a09f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go @@ -0,0 +1,803 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.HealthStatus(0) +) + +// Validate checks the field values on Clusters with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Clusters) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Clusters with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClustersMultiError, or nil +// if none found. +func (m *Clusters) ValidateAll() error { + return m.validate(true) +} + +func (m *Clusters) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetClusterStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClustersMultiError(errors) + } + + return nil +} + +// ClustersMultiError is an error wrapping multiple validation errors returned +// by Clusters.ValidateAll() if the designated constraints aren't met. +type ClustersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersMultiError) AllErrors() []error { return m } + +// ClustersValidationError is the validation error returned by +// Clusters.Validate if the designated constraints aren't met. +type ClustersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersValidationError) ErrorName() string { return "ClustersValidationError" } + +// Error satisfies the builtin error interface +func (e ClustersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusters.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersValidationError{} + +// Validate checks the field values on ClusterStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterStatusMultiError, or +// nil if none found. +func (m *ClusterStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for AddedViaApi + + if all { + switch v := interface{}(m.GetSuccessRateEjectionThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHostStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCircuitBreakers()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ObservabilityName + + // no validation rules for EdsServiceName + + if len(errors) > 0 { + return ClusterStatusMultiError(errors) + } + + return nil +} + +// ClusterStatusMultiError is an error wrapping multiple validation errors +// returned by ClusterStatus.ValidateAll() if the designated constraints +// aren't met. +type ClusterStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStatusMultiError) AllErrors() []error { return m } + +// ClusterStatusValidationError is the validation error returned by +// ClusterStatus.Validate if the designated constraints aren't met. +type ClusterStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStatusValidationError) ErrorName() string { return "ClusterStatusValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStatusValidationError{} + +// Validate checks the field values on HostStatus with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HostStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HostStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HostStatusMultiError, or +// nil if none found. +func (m *HostStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HostStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetHealthStatus()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthStatus()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Weight + + // no validation rules for Hostname + + // no validation rules for Priority + + if all { + switch v := interface{}(m.GetLocalOriginSuccessRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalOriginSuccessRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HostStatusMultiError(errors) + } + + return nil +} + +// HostStatusMultiError is an error wrapping multiple validation errors +// returned by HostStatus.ValidateAll() if the designated constraints aren't met. +type HostStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HostStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HostStatusMultiError) AllErrors() []error { return m } + +// HostStatusValidationError is the validation error returned by +// HostStatus.Validate if the designated constraints aren't met. +type HostStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HostStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HostStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HostStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HostStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HostStatusValidationError) ErrorName() string { return "HostStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HostStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHostStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HostStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HostStatusValidationError{} + +// Validate checks the field values on HostHealthStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HostHealthStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HostHealthStatus with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HostHealthStatusMultiError, or nil if none found. +func (m *HostHealthStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HostHealthStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FailedActiveHealthCheck + + // no validation rules for FailedOutlierCheck + + // no validation rules for FailedActiveDegradedCheck + + // no validation rules for PendingDynamicRemoval + + // no validation rules for PendingActiveHc + + // no validation rules for ExcludedViaImmediateHcFail + + // no validation rules for ActiveHcTimeout + + // no validation rules for EdsHealthStatus + + if len(errors) > 0 { + return HostHealthStatusMultiError(errors) + } + + return nil +} + +// HostHealthStatusMultiError is an error wrapping multiple validation errors +// returned by HostHealthStatus.ValidateAll() if the designated constraints +// aren't met. +type HostHealthStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HostHealthStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HostHealthStatusMultiError) AllErrors() []error { return m } + +// HostHealthStatusValidationError is the validation error returned by +// HostHealthStatus.Validate if the designated constraints aren't met. +type HostHealthStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HostHealthStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HostHealthStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HostHealthStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HostHealthStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HostHealthStatusValidationError) ErrorName() string { return "HostHealthStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HostHealthStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHostHealthStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HostHealthStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HostHealthStatusValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go new file mode 100644 index 000000000..418581107 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go @@ -0,0 +1,656 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Clusters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Clusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Clusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStatuses) > 0 { + for iNdEx := len(m.ClusterStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EdsServiceName) > 0 { + i -= len(m.EdsServiceName) + copy(dAtA[i:], m.EdsServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EdsServiceName))) + i-- + dAtA[i] = 0x42 + } + if len(m.ObservabilityName) > 0 { + i -= len(m.ObservabilityName) + copy(dAtA[i:], m.ObservabilityName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ObservabilityName))) + i-- + dAtA[i] = 0x3a + } + if m.CircuitBreakers != nil { + if vtmsg, ok := interface{}(m.CircuitBreakers).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CircuitBreakers) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.HostStatuses) > 0 { + for iNdEx := len(m.HostStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HostStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.SuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.AddedViaApi { + i-- + if m.AddedViaApi { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.LocalOriginSuccessRate != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x38 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x32 + } + if m.Weight != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessRate != nil { + if vtmsg, ok := interface{}(m.SuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.HealthStatus != nil { + size, err := m.HealthStatus.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostHealthStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostHealthStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostHealthStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ActiveHcTimeout { + i-- + if m.ActiveHcTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.ExcludedViaImmediateHcFail { + i-- + if m.ExcludedViaImmediateHcFail { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PendingActiveHc { + i-- + if m.PendingActiveHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.PendingDynamicRemoval { + i-- + if m.PendingDynamicRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.FailedActiveDegradedCheck { + i-- + if m.FailedActiveDegradedCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.EdsHealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EdsHealthStatus)) + i-- + dAtA[i] = 0x18 + } + if m.FailedOutlierCheck { + i-- + if m.FailedOutlierCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedActiveHealthCheck { + i-- + if m.FailedActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Clusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterStatuses) > 0 { + for _, e := range m.ClusterStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AddedViaApi { + n += 2 + } + if m.SuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HostStatuses) > 0 { + for _, e := range m.HostStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + if size, ok := interface{}(m.CircuitBreakers).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CircuitBreakers) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ObservabilityName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EdsServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HealthStatus != nil { + l = m.HealthStatus.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRate != nil { + if size, ok := interface{}(m.SuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Weight)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.LocalOriginSuccessRate != nil { + if size, ok := interface{}(m.LocalOriginSuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostHealthStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedActiveHealthCheck { + n += 2 + } + if m.FailedOutlierCheck { + n += 2 + } + if m.EdsHealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EdsHealthStatus)) + } + if m.FailedActiveDegradedCheck { + n += 2 + } + if m.PendingDynamicRemoval { + n += 2 + } + if m.PendingActiveHc { + n += 2 + } + if m.ExcludedViaImmediateHcFail { + n += 2 + } + if m.ActiveHcTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go new file mode 100644 index 000000000..ef711d966 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go @@ -0,0 +1,642 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The :ref:`/config_dump ` admin endpoint uses this wrapper +// message to maintain and serve arbitrary configuration information from any component in Envoy. +type ConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This list is serialized and dumped in its entirety at the + // :ref:`/config_dump ` endpoint. + // + // The following configurations are currently supported and will be dumped in the order given + // below: + // + // * “bootstrap“: :ref:`BootstrapConfigDump ` + // * “clusters“: :ref:`ClustersConfigDump ` + // * “ecds_filter_http“: :ref:`EcdsConfigDump ` + // * “ecds_filter_quic_listener“: :ref:`EcdsConfigDump ` + // * “ecds_filter_tcp_listener“: :ref:`EcdsConfigDump ` + // * “endpoints“: :ref:`EndpointsConfigDump ` + // * “listeners“: :ref:`ListenersConfigDump ` + // * “scoped_routes“: :ref:`ScopedRoutesConfigDump ` + // * “routes“: :ref:`RoutesConfigDump ` + // * “secrets“: :ref:`SecretsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter “?include_eds“ + // + // Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for + // either HTTP or listener filter will only be dumped if it is actually configured. + // + // You can filter output with the resource and mask query parameters. + // See :ref:`/config_dump?resource={} `, + // :ref:`/config_dump?mask={} `, + // or :ref:`/config_dump?resource={},mask={} + // ` for more information. + Configs []*anypb.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` +} + +func (x *ConfigDump) Reset() { + *x = ConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigDump) ProtoMessage() {} + +func (x *ConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigDump.ProtoReflect.Descriptor instead. +func (*ConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0} +} + +func (x *ConfigDump) GetConfigs() []*anypb.Any { + if x != nil { + return x.Configs + } + return nil +} + +// This message describes the bootstrap configuration that Envoy was started with. This includes +// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate +// the static portions of an Envoy configuration by reusing the output as the bootstrap +// configuration for another Envoy. +type BootstrapConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bootstrap *v3.Bootstrap `protobuf:"bytes,1,opt,name=bootstrap,proto3" json:"bootstrap,omitempty"` + // The timestamp when the BootstrapConfig was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *BootstrapConfigDump) Reset() { + *x = BootstrapConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapConfigDump) ProtoMessage() {} + +func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapConfigDump.ProtoReflect.Descriptor instead. +func (*BootstrapConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1} +} + +func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap { + if x != nil { + return x.Bootstrap + } + return nil +} + +func (x *BootstrapConfigDump) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +type SecretsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded secrets. + StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"` + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"` + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"` +} + +func (x *SecretsConfigDump) Reset() { + *x = SecretsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump) ProtoMessage() {} + +func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2} +} + +func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret { + if x != nil { + return x.StaticSecrets + } + return nil +} + +func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret { + if x != nil { + return x.DynamicActiveSecrets + } + return nil +} + +func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret { + if x != nil { + return x.DynamicWarmingSecrets + } + return nil +} + +// DynamicSecret contains secret information fetched via SDS. +// [#next-free-field: 7] +type SecretsConfigDump_DynamicSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *anypb.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The *error_state* field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *SecretsConfigDump_DynamicSecret) Reset() { + *x = SecretsConfigDump_DynamicSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump_DynamicSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {} + +func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SecretsConfigDump_DynamicSecret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetSecret() *anypb.Any { + if x != nil { + return x.Secret + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// StaticSecret specifies statically loaded secret in bootstrap. +type SecretsConfigDump_StaticSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *anypb.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` +} + +func (x *SecretsConfigDump_StaticSecret) Reset() { + *x = SecretsConfigDump_StaticSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump_StaticSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump_StaticSecret) ProtoMessage() {} + +func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *SecretsConfigDump_StaticSecret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *SecretsConfigDump_StaticSecret) GetSecret() *anypb.Any { + if x != nil { + return x.Secret + } + return nil +} + +var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x42, 0x78, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc +) + +func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_proto_rawDescData +} + +var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{ + (*ConfigDump)(nil), // 0: envoy.admin.v3.ConfigDump + (*BootstrapConfigDump)(nil), // 1: envoy.admin.v3.BootstrapConfigDump + (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump + (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret + (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret + (*anypb.Any)(nil), // 5: google.protobuf.Any + (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState + (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus +} +var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{ + 5, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any + 6, // 1: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap + 7, // 2: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp + 4, // 3: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret + 3, // 4: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 3, // 5: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 7, // 6: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 7: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any + 8, // 8: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 9, // 9: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 7, // 10: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 11: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_config_dump_proto_init() } +func file_envoy_admin_v3_config_dump_proto_init() { + if File_envoy_admin_v3_config_dump_proto != nil { + return + } + file_envoy_admin_v3_config_dump_shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_config_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump_DynamicSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump_StaticSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_config_dump_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_config_dump_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_config_dump_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_config_dump_proto_msgTypes, + }.Build() + File_envoy_admin_v3_config_dump_proto = out.File + file_envoy_admin_v3_config_dump_proto_rawDesc = nil + file_envoy_admin_v3_config_dump_proto_goTypes = nil + file_envoy_admin_v3_config_dump_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go new file mode 100644 index 000000000..6f494af0b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go @@ -0,0 +1,893 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ConfigDump with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConfigDump with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ConfigDumpMultiError, or +// nil if none found. +func (m *ConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ConfigDumpMultiError(errors) + } + + return nil +} + +// ConfigDumpMultiError is an error wrapping multiple validation errors +// returned by ConfigDump.ValidateAll() if the designated constraints aren't met. +type ConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConfigDumpMultiError) AllErrors() []error { return m } + +// ConfigDumpValidationError is the validation error returned by +// ConfigDump.Validate if the designated constraints aren't met. +type ConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConfigDumpValidationError) ErrorName() string { return "ConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e ConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConfigDumpValidationError{} + +// Validate checks the field values on BootstrapConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *BootstrapConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BootstrapConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BootstrapConfigDumpMultiError, or nil if none found. +func (m *BootstrapConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *BootstrapConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBootstrap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBootstrap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BootstrapConfigDumpMultiError(errors) + } + + return nil +} + +// BootstrapConfigDumpMultiError is an error wrapping multiple validation +// errors returned by BootstrapConfigDump.ValidateAll() if the designated +// constraints aren't met. +type BootstrapConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BootstrapConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BootstrapConfigDumpMultiError) AllErrors() []error { return m } + +// BootstrapConfigDumpValidationError is the validation error returned by +// BootstrapConfigDump.Validate if the designated constraints aren't met. +type BootstrapConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BootstrapConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BootstrapConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BootstrapConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BootstrapConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BootstrapConfigDumpValidationError) ErrorName() string { + return "BootstrapConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e BootstrapConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrapConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BootstrapConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BootstrapConfigDumpValidationError{} + +// Validate checks the field values on SecretsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SecretsConfigDumpMultiError, or nil if none found. +func (m *SecretsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicActiveSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicWarmingSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SecretsConfigDumpMultiError(errors) + } + + return nil +} + +// SecretsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by SecretsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type SecretsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDumpMultiError) AllErrors() []error { return m } + +// SecretsConfigDumpValidationError is the validation error returned by +// SecretsConfigDump.Validate if the designated constraints aren't met. +type SecretsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDumpValidationError) ErrorName() string { + return "SecretsConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDumpValidationError{} + +// Validate checks the field values on SecretsConfigDump_DynamicSecret with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump_DynamicSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_DynamicSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return SecretsConfigDump_DynamicSecretMultiError(errors) + } + + return nil +} + +// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_DynamicSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDump_DynamicSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m } + +// SecretsConfigDump_DynamicSecretValidationError is the validation error +// returned by SecretsConfigDump_DynamicSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_DynamicSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string { + return "SecretsConfigDump_DynamicSecretValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDump_DynamicSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDump_DynamicSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDump_DynamicSecretValidationError{} + +// Validate checks the field values on SecretsConfigDump_StaticSecret with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump_StaticSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_StaticSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_StaticSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SecretsConfigDump_StaticSecretMultiError(errors) + } + + return nil +} + +// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_StaticSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDump_StaticSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m } + +// SecretsConfigDump_StaticSecretValidationError is the validation error +// returned by SecretsConfigDump_StaticSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_StaticSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string { + return "SecretsConfigDump_StaticSecretValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDump_StaticSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDump_StaticSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDump_StaticSecretValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go new file mode 100644 index 000000000..feb0921ae --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go @@ -0,0 +1,2242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource status from the view of a xDS client, which tells the synchronization +// status between the xDS client and the xDS server. +type ClientResourceStatus int32 + +const ( + // Resource status is not available/unknown. + ClientResourceStatus_UNKNOWN ClientResourceStatus = 0 + // Client requested this resource but hasn't received any update from management + // server. The client will not fail requests, but will queue them until update + // arrives or the client times out waiting for the resource. + ClientResourceStatus_REQUESTED ClientResourceStatus = 1 + // This resource has been requested by the client but has either not been + // delivered by the server or was previously delivered by the server and then + // subsequently removed from resources provided by the server. For more + // information, please refer to the :ref:`"Knowing When a Requested Resource + // Does Not Exist" ` section. + ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2 + // Client received this resource and replied with ACK. + ClientResourceStatus_ACKED ClientResourceStatus = 3 + // Client received this resource and replied with NACK. + ClientResourceStatus_NACKED ClientResourceStatus = 4 +) + +// Enum value maps for ClientResourceStatus. +var ( + ClientResourceStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REQUESTED", + 2: "DOES_NOT_EXIST", + 3: "ACKED", + 4: "NACKED", + } + ClientResourceStatus_value = map[string]int32{ + "UNKNOWN": 0, + "REQUESTED": 1, + "DOES_NOT_EXIST": 2, + "ACKED": 3, + "NACKED": 4, + } +) + +func (x ClientResourceStatus) Enum() *ClientResourceStatus { + p := new(ClientResourceStatus) + *p = x + return p +} + +func (x ClientResourceStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0].Descriptor() +} + +func (ClientResourceStatus) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0] +} + +func (x ClientResourceStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientResourceStatus.Descriptor instead. +func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +type UpdateFailureState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // What the component configuration would have been if the update had succeeded. + // This field may not be populated by xDS clients due to storage overhead. + FailedConfiguration *anypb.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` + // Time of the latest failed update attempt. + LastUpdateAttempt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` + // Details about the last failed update attempt. + Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` + // This is the version of the rejected resource. + // [#not-implemented-hide:] + VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *UpdateFailureState) Reset() { + *x = UpdateFailureState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateFailureState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateFailureState) ProtoMessage() {} + +func (x *UpdateFailureState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead. +func (*UpdateFailureState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateFailureState) GetFailedConfiguration() *anypb.Any { + if x != nil { + return x.FailedConfiguration + } + return nil +} + +func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateAttempt + } + return nil +} + +func (x *UpdateFailureState) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +func (x *UpdateFailureState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +type ListenersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded listener configs. + StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"` + // State for any warming, active, or draining listeners. + DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"` +} + +func (x *ListenersConfigDump) Reset() { + *x = ListenersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump) ProtoMessage() {} + +func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener { + if x != nil { + return x.StaticListeners + } + return nil +} + +func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener { + if x != nil { + return x.DynamicListeners + } + return nil +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +type ClustersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded cluster configs. + StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"` + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"` + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"` +} + +func (x *ClustersConfigDump) Reset() { + *x = ClustersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump) ProtoMessage() {} + +func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2} +} + +func (x *ClustersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster { + if x != nil { + return x.StaticClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicActiveClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicWarmingClusters + } + return nil +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration +// or defined inline while configuring listeners are separated from those configured dynamically via RDS. +// Route configuration information can be used to recreate an Envoy configuration by populating all routes +// as static routes or by returning them in RDS responses. +type RoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded route configs. + StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"` + // The dynamically loaded route configs. + DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"` +} + +func (x *RoutesConfigDump) Reset() { + *x = RoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump) ProtoMessage() {} + +func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3} +} + +func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig { + if x != nil { + return x.StaticRouteConfigs + } + return nil +} + +func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig { + if x != nil { + return x.DynamicRouteConfigs + } + return nil +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +type ScopedRoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded scoped route configs. + InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"` + // The dynamically loaded scoped route configs. + DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"` +} + +func (x *ScopedRoutesConfigDump) Reset() { + *x = ScopedRoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4} +} + +func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs { + if x != nil { + return x.InlineScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs { + if x != nil { + return x.DynamicScopedRouteConfigs + } + return nil +} + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +type EndpointsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded endpoint configs. + StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"` + // The dynamically loaded endpoint configs. + DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"` +} + +func (x *EndpointsConfigDump) Reset() { + *x = EndpointsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump) ProtoMessage() {} + +func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5} +} + +func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig { + if x != nil { + return x.StaticEndpointConfigs + } + return nil +} + +func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig { + if x != nil { + return x.DynamicEndpointConfigs + } + return nil +} + +// Envoy's ECDS service fills this message with all currently extension +// configuration. Extension configuration information can be used to recreate +// an Envoy ECDS listener and HTTP filters as static filters or by returning +// them in ECDS response. +type EcdsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ECDS filter configs. + EcdsFilters []*EcdsConfigDump_EcdsFilterConfig `protobuf:"bytes,1,rep,name=ecds_filters,json=ecdsFilters,proto3" json:"ecds_filters,omitempty"` +} + +func (x *EcdsConfigDump) Reset() { + *x = EcdsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump) ProtoMessage() {} + +func (x *EcdsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6} +} + +func (x *EcdsConfigDump) GetEcdsFilters() []*EcdsConfigDump_EcdsFilterConfig { + if x != nil { + return x.EcdsFilters + } + return nil +} + +// Describes a statically loaded listener. +type ListenersConfigDump_StaticListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The listener config. + Listener *anypb.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_StaticListener) Reset() { + *x = ListenersConfigDump_StaticListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_StaticListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_StaticListener) ProtoMessage() {} + +func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *ListenersConfigDump_StaticListener) GetListener() *anypb.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +type ListenersConfigDump_DynamicListenerState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The listener config. + Listener *anypb.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListenerState) Reset() { + *x = ListenersConfigDump_DynamicListenerState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListenerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump_DynamicListenerState) GetListener() *anypb.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded listener via the LDS API. +// [#next-free-field: 7] +type ListenersConfigDump_DynamicListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name or unique id of this listener, pulled from the DynamicListenerState config. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The listener state for any active listener by this name. + // These are listeners that are available to service data plane traffic. + ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` + // The listener state for any warming listener by this name. + // These are listeners that are currently undergoing warming in preparation to service data + // plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the warming listeners should generally be discarded. + WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"` + // The listener state for any draining listener by this name. + // These are listeners that are currently undergoing draining in preparation to stop servicing + // data plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the draining listeners should generally be discarded. + DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListener) Reset() { + *x = ListenersConfigDump_DynamicListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListener) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *ListenersConfigDump_DynamicListener) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.ActiveState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.WarmingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.DrainingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// Describes a statically loaded cluster. +type ClustersConfigDump_StaticCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster config. + Cluster *anypb.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ClustersConfigDump_StaticCluster) Reset() { + *x = ClustersConfigDump_StaticCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_StaticCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_StaticCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClustersConfigDump_StaticCluster) GetCluster() *anypb.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded cluster via the CDS API. +// [#next-free-field: 6] +type ClustersConfigDump_DynamicCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The cluster config. + Cluster *anypb.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ClustersConfigDump_DynamicCluster) Reset() { + *x = ClustersConfigDump_DynamicCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_DynamicCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump_DynamicCluster) GetCluster() *anypb.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type RoutesConfigDump_StaticRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The route config. + RouteConfig *anypb.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *RoutesConfigDump_StaticRouteConfig) Reset() { + *x = RoutesConfigDump_StaticRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_StaticRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *anypb.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type RoutesConfigDump_DynamicRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The route config. + RouteConfig *anypb.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *RoutesConfigDump_DynamicRouteConfig) Reset() { + *x = RoutesConfigDump_DynamicRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_DynamicRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *anypb.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 7] +type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type EndpointsConfigDump_StaticEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The endpoint config. + EndpointConfig *anypb.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() { + *x = EndpointsConfigDump_StaticEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *anypb.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type EndpointsConfigDump_DynamicEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The endpoint config. + EndpointConfig *anypb.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() { + *x = EndpointsConfigDump_DynamicEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 1} +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *anypb.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// [#next-free-field: 6] +type EcdsConfigDump_EcdsFilterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently + // taken from the :ref:`version_info + // ` + // field at the time that the ECDS filter was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The ECDS filter config. + EcdsFilter *anypb.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"` + // The timestamp when the ECDS filter was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this + // particular resource along with the reason and timestamp. For successfully + // updated or acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EcdsConfigDump_EcdsFilterConfig) Reset() { + *x = EcdsConfigDump_EcdsFilterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump_EcdsFilterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump_EcdsFilterConfig) ProtoMessage() {} + +func (x *EcdsConfigDump_EcdsFilterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump_EcdsFilterConfig.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump_EcdsFilterConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *anypb.Any { + if x != nil { + return x.EcdsFilter + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +var File_envoy_admin_v3_config_dump_shared_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_shared_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, + 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, + 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, 0x0a, 0x10, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x30, + 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, + 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x1a, 0xef, + 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, + 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x43, 0x9a, 0xc5, 0x88, + 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x15, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a, + 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x0a, + 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x7e, 0x0a, + 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x81, 0x01, + 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, + 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x31, + 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x22, 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x1a, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x89, 0x04, 0x0a, 0x0e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x52, 0x0a, 0x0c, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x63, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xf7, 0x02, 0x0a, 0x10, 0x45, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x35, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2a, 0x5d, + 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, + 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, + 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x42, 0x7e, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = file_envoy_admin_v3_config_dump_shared_proto_rawDesc +) + +func file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_shared_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_shared_proto_rawDescData +} + +var file_envoy_admin_v3_config_dump_shared_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_admin_v3_config_dump_shared_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{ + (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus + (*UpdateFailureState)(nil), // 1: envoy.admin.v3.UpdateFailureState + (*ListenersConfigDump)(nil), // 2: envoy.admin.v3.ListenersConfigDump + (*ClustersConfigDump)(nil), // 3: envoy.admin.v3.ClustersConfigDump + (*RoutesConfigDump)(nil), // 4: envoy.admin.v3.RoutesConfigDump + (*ScopedRoutesConfigDump)(nil), // 5: envoy.admin.v3.ScopedRoutesConfigDump + (*EndpointsConfigDump)(nil), // 6: envoy.admin.v3.EndpointsConfigDump + (*EcdsConfigDump)(nil), // 7: envoy.admin.v3.EcdsConfigDump + (*ListenersConfigDump_StaticListener)(nil), // 8: envoy.admin.v3.ListenersConfigDump.StaticListener + (*ListenersConfigDump_DynamicListenerState)(nil), // 9: envoy.admin.v3.ListenersConfigDump.DynamicListenerState + (*ListenersConfigDump_DynamicListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.DynamicListener + (*ClustersConfigDump_StaticCluster)(nil), // 11: envoy.admin.v3.ClustersConfigDump.StaticCluster + (*ClustersConfigDump_DynamicCluster)(nil), // 12: envoy.admin.v3.ClustersConfigDump.DynamicCluster + (*RoutesConfigDump_StaticRouteConfig)(nil), // 13: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + (*RoutesConfigDump_DynamicRouteConfig)(nil), // 14: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 15: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 16: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + (*anypb.Any)(nil), // 20: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp +} +var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{ + 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any + 21, // 1: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp + 8, // 2: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener + 10, // 3: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener + 11, // 4: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster + 12, // 5: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 12, // 6: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 13, // 7: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + 14, // 8: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + 15, // 9: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + 16, // 10: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + 17, // 11: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + 18, // 12: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + 19, // 13: envoy.admin.v3.EcdsConfigDump.ecds_filters:type_name -> envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + 20, // 14: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any + 21, // 15: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp + 20, // 16: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any + 21, // 17: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp + 9, // 18: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 19: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 20: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 1, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 23: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any + 21, // 24: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp + 20, // 25: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any + 21, // 26: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp + 1, // 27: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 28: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 29: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 30: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 31: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 32: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 33: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 34: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 35: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 36: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 20, // 37: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 38: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 1, // 39: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 40: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 41: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 42: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 43: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 44: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 45: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 46: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 47: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.ecds_filter:type_name -> google.protobuf.Any + 21, // 48: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 49: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 50: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 51, // [51:51] is the sub-list for method output_type + 51, // [51:51] is the sub-list for method input_type + 51, // [51:51] is the sub-list for extension type_name + 51, // [51:51] is the sub-list for extension extendee + 0, // [0:51] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_config_dump_shared_proto_init() } +func file_envoy_admin_v3_config_dump_shared_proto_init() { + if File_envoy_admin_v3_config_dump_shared_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateFailureState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_StaticListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListenerState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_StaticCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_DynamicCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_StaticRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump_EcdsFilterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_config_dump_shared_proto_rawDesc, + NumEnums: 1, + NumMessages: 19, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_config_dump_shared_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_config_dump_shared_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_config_dump_shared_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_config_dump_shared_proto_msgTypes, + }.Build() + File_envoy_admin_v3_config_dump_shared_proto = out.File + file_envoy_admin_v3_config_dump_shared_proto_rawDesc = nil + file_envoy_admin_v3_config_dump_shared_proto_goTypes = nil + file_envoy_admin_v3_config_dump_shared_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go new file mode 100644 index 000000000..dd16990ad --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go @@ -0,0 +1,3435 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateFailureState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateFailureStateMultiError, or nil if none found. +func (m *UpdateFailureState) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateFailureState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFailedConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdateAttempt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Details + + // no validation rules for VersionInfo + + if len(errors) > 0 { + return UpdateFailureStateMultiError(errors) + } + + return nil +} + +// UpdateFailureStateMultiError is an error wrapping multiple validation errors +// returned by UpdateFailureState.ValidateAll() if the designated constraints +// aren't met. +type UpdateFailureStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateFailureStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateFailureStateMultiError) AllErrors() []error { return m } + +// UpdateFailureStateValidationError is the validation error returned by +// UpdateFailureState.Validate if the designated constraints aren't met. +type UpdateFailureStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateFailureStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateFailureStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateFailureStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateFailureStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateFailureStateValidationError) ErrorName() string { + return "UpdateFailureStateValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateFailureStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateFailureState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateFailureStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateFailureStateValidationError{} + +// Validate checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListenersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenersConfigDumpMultiError, or nil if none found. +func (m *ListenersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenersConfigDumpMultiError(errors) + } + + return nil +} + +// ListenersConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ListenersConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDumpMultiError) AllErrors() []error { return m } + +// ListenersConfigDumpValidationError is the validation error returned by +// ListenersConfigDump.Validate if the designated constraints aren't met. +type ListenersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDumpValidationError) ErrorName() string { + return "ListenersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDumpValidationError{} + +// Validate checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClustersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClustersConfigDumpMultiError, or nil if none found. +func (m *ClustersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicActiveClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicWarmingClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClustersConfigDumpMultiError(errors) + } + + return nil +} + +// ClustersConfigDumpMultiError is an error wrapping multiple validation errors +// returned by ClustersConfigDump.ValidateAll() if the designated constraints +// aren't met. +type ClustersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDumpMultiError) AllErrors() []error { return m } + +// ClustersConfigDumpValidationError is the validation error returned by +// ClustersConfigDump.Validate if the designated constraints aren't met. +type ClustersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDumpValidationError) ErrorName() string { + return "ClustersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDumpValidationError{} + +// Validate checks the field values on RoutesConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RoutesConfigDumpMultiError, or nil if none found. +func (m *RoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RoutesConfigDumpMultiError(errors) + } + + return nil +} + +// RoutesConfigDumpMultiError is an error wrapping multiple validation errors +// returned by RoutesConfigDump.ValidateAll() if the designated constraints +// aren't met. +type RoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDumpMultiError) AllErrors() []error { return m } + +// RoutesConfigDumpValidationError is the validation error returned by +// RoutesConfigDump.Validate if the designated constraints aren't met. +type RoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e RoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDumpValidationError{} + +// Validate checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutesConfigDumpMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetInlineScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRoutesConfigDumpMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ScopedRoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDumpValidationError is the validation error returned by +// ScopedRoutesConfigDump.Validate if the designated constraints aren't met. +type ScopedRoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDumpValidationError) ErrorName() string { + return "ScopedRoutesConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDumpValidationError{} + +// Validate checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EndpointsConfigDumpMultiError, or nil if none found. +func (m *EndpointsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EndpointsConfigDumpMultiError(errors) + } + + return nil +} + +// EndpointsConfigDumpMultiError is an error wrapping multiple validation +// errors returned by EndpointsConfigDump.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m } + +// EndpointsConfigDumpValidationError is the validation error returned by +// EndpointsConfigDump.Validate if the designated constraints aren't met. +type EndpointsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDumpValidationError) ErrorName() string { + return "EndpointsConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDumpValidationError{} + +// Validate checks the field values on EcdsConfigDump with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EcdsConfigDumpMultiError, +// or nil if none found. +func (m *EcdsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetEcdsFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EcdsConfigDumpMultiError(errors) + } + + return nil +} + +// EcdsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by EcdsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type EcdsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDumpMultiError) AllErrors() []error { return m } + +// EcdsConfigDumpValidationError is the validation error returned by +// EcdsConfigDump.Validate if the designated constraints aren't met. +type EcdsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDumpValidationError) ErrorName() string { return "EcdsConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e EcdsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDumpValidationError{} + +// Validate checks the field values on ListenersConfigDump_StaticListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_StaticListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_StaticListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_StaticListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_StaticListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_StaticListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_StaticListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_StaticListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_StaticListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_StaticListenerValidationError is the validation error +// returned by ListenersConfigDump_StaticListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string { + return "ListenersConfigDump_StaticListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_StaticListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_StaticListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_StaticListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_StaticListenerValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListenerState +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ListenersConfigDump_DynamicListenerState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ListenersConfigDump_DynamicListenerState with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerStateMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping +// multiple validation errors returned by +// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerStateValidationError is the validation +// error returned by ListenersConfigDump_DynamicListenerState.Validate if the +// designated constraints aren't met. +type ListenersConfigDump_DynamicListenerStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerStateValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerStateValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_DynamicListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_DynamicListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetActiveState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWarmingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDrainingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_DynamicListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerValidationError is the validation error +// returned by ListenersConfigDump_DynamicListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerValidationError{} + +// Validate checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_StaticCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_StaticClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_StaticCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_StaticCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClustersConfigDump_StaticClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_StaticCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_StaticClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_StaticClusterValidationError is the validation error +// returned by ClustersConfigDump_StaticCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string { + return "ClustersConfigDump_StaticClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_StaticClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_StaticCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_StaticClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_StaticClusterValidationError{} + +// Validate checks the field values on ClustersConfigDump_DynamicCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_DynamicCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_DynamicClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ClustersConfigDump_DynamicClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_DynamicClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_DynamicClusterValidationError is the validation error +// returned by ClustersConfigDump_DynamicCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string { + return "ClustersConfigDump_DynamicClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_DynamicClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_DynamicClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_DynamicClusterValidationError{} + +// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_StaticRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RoutesConfigDump_StaticRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_StaticRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_StaticRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_StaticRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_StaticRouteConfigValidationError{} + +// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return RoutesConfigDump_DynamicRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_DynamicRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_DynamicRouteConfigValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for VersionInfo + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EndpointsConfigDump_StaticEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_StaticEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +// Validate checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_DynamicEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +// Validate checks the field values on EcdsConfigDump_EcdsFilterConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump_EcdsFilterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump_EcdsFilterConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// EcdsConfigDump_EcdsFilterConfigMultiError, or nil if none found. +func (m *EcdsConfigDump_EcdsFilterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEcdsFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEcdsFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EcdsConfigDump_EcdsFilterConfigMultiError(errors) + } + + return nil +} + +// EcdsConfigDump_EcdsFilterConfigMultiError is an error wrapping multiple +// validation errors returned by EcdsConfigDump_EcdsFilterConfig.ValidateAll() +// if the designated constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) AllErrors() []error { return m } + +// EcdsConfigDump_EcdsFilterConfigValidationError is the validation error +// returned by EcdsConfigDump_EcdsFilterConfig.Validate if the designated +// constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) ErrorName() string { + return "EcdsConfigDump_EcdsFilterConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump_EcdsFilterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDump_EcdsFilterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDump_EcdsFilterConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go new file mode 100644 index 000000000..934de8568 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go @@ -0,0 +1,1715 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpdateFailureState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateFailureState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpdateFailureState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Details) > 0 { + i -= len(m.Details) + copy(dAtA[i:], m.Details) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Details))) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdateAttempt != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdateAttempt).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FailedConfiguration != nil { + size, err := (*anypb.Any)(m.FailedConfiguration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_StaticListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DrainingState != nil { + size, err := m.DrainingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.WarmingState != nil { + size, err := m.WarmingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveState != nil { + size, err := m.ActiveState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicListeners) > 0 { + for iNdEx := len(m.DynamicListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticListeners) > 0 { + for iNdEx := len(m.StaticListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingClusters) > 0 { + for iNdEx := len(m.DynamicWarmingClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.DynamicActiveClusters) > 0 { + for iNdEx := len(m.DynamicActiveClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticClusters) > 0 { + for iNdEx := len(m.StaticClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicRouteConfigs) > 0 { + for iNdEx := len(m.DynamicRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticRouteConfigs) > 0 { + for iNdEx := len(m.StaticRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for iNdEx := len(m.DynamicScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.InlineScopedRouteConfigs) > 0 { + for iNdEx := len(m.InlineScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicEndpointConfigs) > 0 { + for iNdEx := len(m.DynamicEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticEndpointConfigs) > 0 { + for iNdEx := len(m.StaticEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EcdsFilter != nil { + size, err := (*anypb.Any)(m.EcdsFilter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EcdsFilters) > 0 { + for iNdEx := len(m.EcdsFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EcdsFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpdateFailureState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedConfiguration != nil { + l = (*anypb.Any)(m.FailedConfiguration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdateAttempt != nil { + l = (*timestamppb.Timestamp)(m.LastUpdateAttempt).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Details) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_StaticListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListenerState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveState != nil { + l = m.ActiveState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WarmingState != nil { + l = m.WarmingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainingState != nil { + l = m.DrainingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticListeners) > 0 { + for _, e := range m.StaticListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicListeners) > 0 { + for _, e := range m.DynamicListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_StaticCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_DynamicCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticClusters) > 0 { + for _, e := range m.StaticClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveClusters) > 0 { + for _, e := range m.DynamicActiveClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingClusters) > 0 { + for _, e := range m.DynamicWarmingClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_StaticRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_DynamicRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticRouteConfigs) > 0 { + for _, e := range m.StaticRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicRouteConfigs) > 0 { + for _, e := range m.DynamicRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.InlineScopedRouteConfigs) > 0 { + for _, e := range m.InlineScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for _, e := range m.DynamicScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticEndpointConfigs) > 0 { + for _, e := range m.StaticEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicEndpointConfigs) > 0 { + for _, e := range m.DynamicEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump_EcdsFilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EcdsFilter != nil { + l = (*anypb.Any)(m.EcdsFilter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.EcdsFilters) > 0 { + for _, e := range m.EcdsFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go new file mode 100644 index 000000000..78e37eec9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go @@ -0,0 +1,466 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Configs) > 0 { + for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Configs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BootstrapConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BootstrapConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BootstrapConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Bootstrap != nil { + if vtmsg, ok := interface{}(m.Bootstrap).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Bootstrap) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingSecrets) > 0 { + for iNdEx := len(m.DynamicWarmingSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DynamicActiveSecrets) > 0 { + for iNdEx := len(m.DynamicActiveSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.StaticSecrets) > 0 { + for iNdEx := len(m.StaticSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BootstrapConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bootstrap != nil { + if size, ok := interface{}(m.Bootstrap).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Bootstrap) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_DynamicSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_StaticSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticSecrets) > 0 { + for _, e := range m.StaticSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveSecrets) > 0 { + for _, e := range m.DynamicActiveSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingSecrets) > 0 { + for _, e := range m.DynamicWarmingSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go new file mode 100644 index 000000000..388c1de32 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, +// which provides the information of their unready targets. +// The :ref:`/init_dump ` will dump all unready targets information. +type UnreadyTargetsDumps struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // You can choose specific component to dump unready targets with mask query parameter. + // See :ref:`/init_dump?mask={} ` for more information. + // The dumps of unready targets of all init managers. + UnreadyTargetsDumps []*UnreadyTargetsDumps_UnreadyTargetsDump `protobuf:"bytes,1,rep,name=unready_targets_dumps,json=unreadyTargetsDumps,proto3" json:"unready_targets_dumps,omitempty"` +} + +func (x *UnreadyTargetsDumps) Reset() { + *x = UnreadyTargetsDumps{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnreadyTargetsDumps) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnreadyTargetsDumps) ProtoMessage() {} + +func (x *UnreadyTargetsDumps) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnreadyTargetsDumps.ProtoReflect.Descriptor instead. +func (*UnreadyTargetsDumps) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0} +} + +func (x *UnreadyTargetsDumps) GetUnreadyTargetsDumps() []*UnreadyTargetsDumps_UnreadyTargetsDump { + if x != nil { + return x.UnreadyTargetsDumps + } + return nil +} + +// Message of unready targets information of an init manager. +type UnreadyTargetsDumps_UnreadyTargetsDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the init manager. Example: "init_manager_xxx". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Names of unready targets of the init manager. Example: "target_xxx". + TargetNames []string `protobuf:"bytes,2,rep,name=target_names,json=targetNames,proto3" json:"target_names,omitempty"` +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) Reset() { + *x = UnreadyTargetsDumps_UnreadyTargetsDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnreadyTargetsDumps_UnreadyTargetsDump) ProtoMessage() {} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnreadyTargetsDumps_UnreadyTargetsDump.ProtoReflect.Descriptor instead. +func (*UnreadyTargetsDumps_UnreadyTargetsDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetTargetNames() []string { + if x != nil { + return x.TargetNames + } + return nil +} + +var File_envoy_admin_v3_init_dump_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_init_dump_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xce, 0x01, 0x0a, 0x13, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x75, 0x6e, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x2e, 0x55, 0x6e, 0x72, 0x65, + 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x52, 0x13, + 0x75, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, + 0x6d, 0x70, 0x73, 0x1a, 0x4b, 0x0a, 0x12, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x49, 0x6e, 0x69, 0x74, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_init_dump_proto_rawDescOnce sync.Once + file_envoy_admin_v3_init_dump_proto_rawDescData = file_envoy_admin_v3_init_dump_proto_rawDesc +) + +func file_envoy_admin_v3_init_dump_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_init_dump_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_init_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_init_dump_proto_rawDescData) + }) + return file_envoy_admin_v3_init_dump_proto_rawDescData +} + +var file_envoy_admin_v3_init_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_init_dump_proto_goTypes = []interface{}{ + (*UnreadyTargetsDumps)(nil), // 0: envoy.admin.v3.UnreadyTargetsDumps + (*UnreadyTargetsDumps_UnreadyTargetsDump)(nil), // 1: envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump +} +var file_envoy_admin_v3_init_dump_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.UnreadyTargetsDumps.unready_targets_dumps:type_name -> envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_init_dump_proto_init() } +func file_envoy_admin_v3_init_dump_proto_init() { + if File_envoy_admin_v3_init_dump_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_init_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnreadyTargetsDumps); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_init_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnreadyTargetsDumps_UnreadyTargetsDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_init_dump_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_init_dump_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_init_dump_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_init_dump_proto_msgTypes, + }.Build() + File_envoy_admin_v3_init_dump_proto = out.File + file_envoy_admin_v3_init_dump_proto_rawDesc = nil + file_envoy_admin_v3_init_dump_proto_goTypes = nil + file_envoy_admin_v3_init_dump_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go new file mode 100644 index 000000000..f746a1264 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go @@ -0,0 +1,281 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UnreadyTargetsDumps with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UnreadyTargetsDumps) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UnreadyTargetsDumps with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UnreadyTargetsDumpsMultiError, or nil if none found. +func (m *UnreadyTargetsDumps) ValidateAll() error { + return m.validate(true) +} + +func (m *UnreadyTargetsDumps) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetUnreadyTargetsDumps() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return UnreadyTargetsDumpsMultiError(errors) + } + + return nil +} + +// UnreadyTargetsDumpsMultiError is an error wrapping multiple validation +// errors returned by UnreadyTargetsDumps.ValidateAll() if the designated +// constraints aren't met. +type UnreadyTargetsDumpsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnreadyTargetsDumpsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnreadyTargetsDumpsMultiError) AllErrors() []error { return m } + +// UnreadyTargetsDumpsValidationError is the validation error returned by +// UnreadyTargetsDumps.Validate if the designated constraints aren't met. +type UnreadyTargetsDumpsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnreadyTargetsDumpsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnreadyTargetsDumpsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnreadyTargetsDumpsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnreadyTargetsDumpsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnreadyTargetsDumpsValidationError) ErrorName() string { + return "UnreadyTargetsDumpsValidationError" +} + +// Error satisfies the builtin error interface +func (e UnreadyTargetsDumpsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnreadyTargetsDumps.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnreadyTargetsDumpsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnreadyTargetsDumpsValidationError{} + +// Validate checks the field values on UnreadyTargetsDumps_UnreadyTargetsDump +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// UnreadyTargetsDumps_UnreadyTargetsDump with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError, or nil if none found. +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) ValidateAll() error { + return m.validate(true) +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if len(errors) > 0 { + return UnreadyTargetsDumps_UnreadyTargetsDumpMultiError(errors) + } + + return nil +} + +// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError is an error wrapping +// multiple validation errors returned by +// UnreadyTargetsDumps_UnreadyTargetsDump.ValidateAll() if the designated +// constraints aren't met. +type UnreadyTargetsDumps_UnreadyTargetsDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) AllErrors() []error { return m } + +// UnreadyTargetsDumps_UnreadyTargetsDumpValidationError is the validation +// error returned by UnreadyTargetsDumps_UnreadyTargetsDump.Validate if the +// designated constraints aren't met. +type UnreadyTargetsDumps_UnreadyTargetsDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) ErrorName() string { + return "UnreadyTargetsDumps_UnreadyTargetsDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnreadyTargetsDumps_UnreadyTargetsDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go new file mode 100644 index 000000000..d957042b8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TargetNames) > 0 { + for iNdEx := len(m.TargetNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetNames[iNdEx]) + copy(dAtA[i:], m.TargetNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.UnreadyTargetsDumps) > 0 { + for iNdEx := len(m.UnreadyTargetsDumps) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UnreadyTargetsDumps[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TargetNames) > 0 { + for _, s := range m.TargetNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UnreadyTargetsDumps) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UnreadyTargetsDumps) > 0 { + for _, e := range m.UnreadyTargetsDumps { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go new file mode 100644 index 000000000..ac6015fac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Admin endpoint uses this wrapper for “/listeners“ to display listener status information. +// See :ref:`/listeners ` for more information. +type Listeners struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of listener statuses. + ListenerStatuses []*ListenerStatus `protobuf:"bytes,1,rep,name=listener_statuses,json=listenerStatuses,proto3" json:"listener_statuses,omitempty"` +} + +func (x *Listeners) Reset() { + *x = Listeners{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listeners) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listeners) ProtoMessage() {} + +func (x *Listeners) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listeners.ProtoReflect.Descriptor instead. +func (*Listeners) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{0} +} + +func (x *Listeners) GetListenerStatuses() []*ListenerStatus { + if x != nil { + return x.ListenerStatuses + } + return nil +} + +// Details an individual listener's current status. +type ListenerStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the listener + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The actual local address that the listener is listening on. If a listener was configured + // to listen on port 0, then this address has the port that was allocated by the OS. + LocalAddress *v3.Address `protobuf:"bytes,2,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"` + // The additional addresses the listener is listening on as specified via the :ref:`additional_addresses ` + // configuration. + AdditionalLocalAddresses []*v3.Address `protobuf:"bytes,3,rep,name=additional_local_addresses,json=additionalLocalAddresses,proto3" json:"additional_local_addresses,omitempty"` +} + +func (x *ListenerStatus) Reset() { + *x = ListenerStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerStatus) ProtoMessage() {} + +func (x *ListenerStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerStatus.ProtoReflect.Descriptor instead. +func (*ListenerStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenerStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListenerStatus) GetLocalAddress() *v3.Address { + if x != nil { + return x.LocalAddress + } + return nil +} + +func (x *ListenerStatus) GetAdditionalLocalAddresses() []*v3.Address { + if x != nil { + return x.AdditionalLocalAddresses + } + return nil +} + +var File_envoy_admin_v3_listeners_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_listeners_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x11, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, + 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x5b, 0x0a, 0x1a, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x77, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_listeners_proto_rawDescOnce sync.Once + file_envoy_admin_v3_listeners_proto_rawDescData = file_envoy_admin_v3_listeners_proto_rawDesc +) + +func file_envoy_admin_v3_listeners_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_listeners_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_listeners_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_listeners_proto_rawDescData) + }) + return file_envoy_admin_v3_listeners_proto_rawDescData +} + +var file_envoy_admin_v3_listeners_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_listeners_proto_goTypes = []interface{}{ + (*Listeners)(nil), // 0: envoy.admin.v3.Listeners + (*ListenerStatus)(nil), // 1: envoy.admin.v3.ListenerStatus + (*v3.Address)(nil), // 2: envoy.config.core.v3.Address +} +var file_envoy_admin_v3_listeners_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Listeners.listener_statuses:type_name -> envoy.admin.v3.ListenerStatus + 2, // 1: envoy.admin.v3.ListenerStatus.local_address:type_name -> envoy.config.core.v3.Address + 2, // 2: envoy.admin.v3.ListenerStatus.additional_local_addresses:type_name -> envoy.config.core.v3.Address + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_listeners_proto_init() } +func file_envoy_admin_v3_listeners_proto_init() { + if File_envoy_admin_v3_listeners_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_listeners_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listeners); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_listeners_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_listeners_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_listeners_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_listeners_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_listeners_proto_msgTypes, + }.Build() + File_envoy_admin_v3_listeners_proto = out.File + file_envoy_admin_v3_listeners_proto_rawDesc = nil + file_envoy_admin_v3_listeners_proto_goTypes = nil + file_envoy_admin_v3_listeners_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go new file mode 100644 index 000000000..02cce2639 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go @@ -0,0 +1,335 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Listeners with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listeners) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listeners with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenersMultiError, or nil +// if none found. +func (m *Listeners) ValidateAll() error { + return m.validate(true) +} + +func (m *Listeners) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetListenerStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenersMultiError(errors) + } + + return nil +} + +// ListenersMultiError is an error wrapping multiple validation errors returned +// by Listeners.ValidateAll() if the designated constraints aren't met. +type ListenersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersMultiError) AllErrors() []error { return m } + +// ListenersValidationError is the validation error returned by +// Listeners.Validate if the designated constraints aren't met. +type ListenersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersValidationError) ErrorName() string { return "ListenersValidationError" } + +// Error satisfies the builtin error interface +func (e ListenersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListeners.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersValidationError{} + +// Validate checks the field values on ListenerStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerStatusMultiError, +// or nil if none found. +func (m *ListenerStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAdditionalLocalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerStatusMultiError(errors) + } + + return nil +} + +// ListenerStatusMultiError is an error wrapping multiple validation errors +// returned by ListenerStatus.ValidateAll() if the designated constraints +// aren't met. +type ListenerStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerStatusMultiError) AllErrors() []error { return m } + +// ListenerStatusValidationError is the validation error returned by +// ListenerStatus.Validate if the designated constraints aren't met. +type ListenerStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerStatusValidationError) ErrorName() string { return "ListenerStatusValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerStatusValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go new file mode 100644 index 000000000..816437acf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go @@ -0,0 +1,203 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Listeners) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listeners) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listeners) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ListenerStatuses) > 0 { + for iNdEx := len(m.ListenerStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalLocalAddresses) > 0 { + for iNdEx := len(m.AdditionalLocalAddresses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AdditionalLocalAddresses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalLocalAddresses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.LocalAddress != nil { + if vtmsg, ok := interface{}(m.LocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listeners) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ListenerStatuses) > 0 { + for _, e := range m.ListenerStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddress != nil { + if size, ok := interface{}(m.LocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalLocalAddresses) > 0 { + for _, e := range m.AdditionalLocalAddresses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go new file mode 100644 index 000000000..32de56ce3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go @@ -0,0 +1,228 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the internal memory consumption of an Envoy instance. These represent +// values extracted from an internal TCMalloc instance. For more information, see the section of the +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). +// [#next-free-field: 7] +type Memory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of bytes allocated by the heap for Envoy. This is an alias for + // “generic.current_allocated_bytes“. + Allocated uint64 `protobuf:"varint,1,opt,name=allocated,proto3" json:"allocated,omitempty"` + // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for + // “generic.heap_size“. + HeapSize uint64 `protobuf:"varint,2,opt,name=heap_size,json=heapSize,proto3" json:"heap_size,omitempty"` + // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards + // virtual memory usage, and depending on the OS, typically do not count towards physical memory + // usage. This is an alias for “tcmalloc.pageheap_unmapped_bytes“. + PageheapUnmapped uint64 `protobuf:"varint,3,opt,name=pageheap_unmapped,json=pageheapUnmapped,proto3" json:"pageheap_unmapped,omitempty"` + // The number of bytes in free, mapped pages in the page heap. These bytes always count towards + // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also + // count towards physical memory usage. This is an alias for “tcmalloc.pageheap_free_bytes“. + PageheapFree uint64 `protobuf:"varint,4,opt,name=pageheap_free,json=pageheapFree,proto3" json:"pageheap_free,omitempty"` + // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias + // for “tcmalloc.current_total_thread_cache_bytes“. + TotalThreadCache uint64 `protobuf:"varint,5,opt,name=total_thread_cache,json=totalThreadCache,proto3" json:"total_thread_cache,omitempty"` + // The number of bytes of the physical memory usage by the allocator. This is an alias for + // “generic.total_physical_bytes“. + TotalPhysicalBytes uint64 `protobuf:"varint,6,opt,name=total_physical_bytes,json=totalPhysicalBytes,proto3" json:"total_physical_bytes,omitempty"` +} + +func (x *Memory) Reset() { + *x = Memory{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_memory_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Memory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Memory) ProtoMessage() {} + +func (x *Memory) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_memory_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Memory.ProtoReflect.Descriptor instead. +func (*Memory) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_memory_proto_rawDescGZIP(), []int{0} +} + +func (x *Memory) GetAllocated() uint64 { + if x != nil { + return x.Allocated + } + return 0 +} + +func (x *Memory) GetHeapSize() uint64 { + if x != nil { + return x.HeapSize + } + return 0 +} + +func (x *Memory) GetPageheapUnmapped() uint64 { + if x != nil { + return x.PageheapUnmapped + } + return 0 +} + +func (x *Memory) GetPageheapFree() uint64 { + if x != nil { + return x.PageheapFree + } + return 0 +} + +func (x *Memory) GetTotalThreadCache() uint64 { + if x != nil { + return x.TotalThreadCache + } + return 0 +} + +func (x *Memory) GetTotalPhysicalBytes() uint64 { + if x != nil { + return x.TotalPhysicalBytes + } + return 0 +} + +var File_envoy_admin_v3_memory_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_memory_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x98, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, + 0x70, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, + 0x70, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x5f, 0x66, + 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x68, + 0x65, 0x61, 0x70, 0x46, 0x72, 0x65, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, + 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x42, 0x74, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_memory_proto_rawDescOnce sync.Once + file_envoy_admin_v3_memory_proto_rawDescData = file_envoy_admin_v3_memory_proto_rawDesc +) + +func file_envoy_admin_v3_memory_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_memory_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_memory_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_memory_proto_rawDescData) + }) + return file_envoy_admin_v3_memory_proto_rawDescData +} + +var file_envoy_admin_v3_memory_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_memory_proto_goTypes = []interface{}{ + (*Memory)(nil), // 0: envoy.admin.v3.Memory +} +var file_envoy_admin_v3_memory_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_memory_proto_init() } +func file_envoy_admin_v3_memory_proto_init() { + if File_envoy_admin_v3_memory_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_memory_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Memory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_memory_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_memory_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_memory_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_memory_proto_msgTypes, + }.Build() + File_envoy_admin_v3_memory_proto = out.File + file_envoy_admin_v3_memory_proto_rawDesc = nil + file_envoy_admin_v3_memory_proto_goTypes = nil + file_envoy_admin_v3_memory_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go new file mode 100644 index 000000000..bcb9c1d20 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go @@ -0,0 +1,147 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Memory with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Memory) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Memory with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MemoryMultiError, or nil if none found. +func (m *Memory) ValidateAll() error { + return m.validate(true) +} + +func (m *Memory) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Allocated + + // no validation rules for HeapSize + + // no validation rules for PageheapUnmapped + + // no validation rules for PageheapFree + + // no validation rules for TotalThreadCache + + // no validation rules for TotalPhysicalBytes + + if len(errors) > 0 { + return MemoryMultiError(errors) + } + + return nil +} + +// MemoryMultiError is an error wrapping multiple validation errors returned by +// Memory.ValidateAll() if the designated constraints aren't met. +type MemoryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MemoryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MemoryMultiError) AllErrors() []error { return m } + +// MemoryValidationError is the validation error returned by Memory.Validate if +// the designated constraints aren't met. +type MemoryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MemoryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MemoryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MemoryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MemoryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MemoryValidationError) ErrorName() string { return "MemoryValidationError" } + +// Error satisfies the builtin error interface +func (e MemoryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMemory.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MemoryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MemoryValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go new file mode 100644 index 000000000..6e3a23688 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Memory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Memory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalPhysicalBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalPhysicalBytes)) + i-- + dAtA[i] = 0x30 + } + if m.TotalThreadCache != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalThreadCache)) + i-- + dAtA[i] = 0x28 + } + if m.PageheapFree != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapFree)) + i-- + dAtA[i] = 0x20 + } + if m.PageheapUnmapped != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapUnmapped)) + i-- + dAtA[i] = 0x18 + } + if m.HeapSize != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeapSize)) + i-- + dAtA[i] = 0x10 + } + if m.Allocated != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Allocated)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Memory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocated != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Allocated)) + } + if m.HeapSize != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeapSize)) + } + if m.PageheapUnmapped != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapUnmapped)) + } + if m.PageheapFree != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapFree)) + } + if m.TotalThreadCache != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalThreadCache)) + } + if m.TotalPhysicalBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalPhysicalBytes)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go new file mode 100644 index 000000000..3b7189596 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SimpleMetric_Type int32 + +const ( + SimpleMetric_COUNTER SimpleMetric_Type = 0 + SimpleMetric_GAUGE SimpleMetric_Type = 1 +) + +// Enum value maps for SimpleMetric_Type. +var ( + SimpleMetric_Type_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + } + SimpleMetric_Type_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + } +) + +func (x SimpleMetric_Type) Enum() *SimpleMetric_Type { + p := new(SimpleMetric_Type) + *p = x + return p +} + +func (x SimpleMetric_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SimpleMetric_Type) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_metrics_proto_enumTypes[0].Descriptor() +} + +func (SimpleMetric_Type) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_metrics_proto_enumTypes[0] +} + +func (x SimpleMetric_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SimpleMetric_Type.Descriptor instead. +func (SimpleMetric_Type) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0, 0} +} + +// Proto representation of an Envoy Counter or Gauge value. +type SimpleMetric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the metric represented. + Type SimpleMetric_Type `protobuf:"varint,1,opt,name=type,proto3,enum=envoy.admin.v3.SimpleMetric_Type" json:"type,omitempty"` + // Current metric value. + Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + // Name of the metric. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *SimpleMetric) Reset() { + *x = SimpleMetric{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SimpleMetric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SimpleMetric) ProtoMessage() {} + +func (x *SimpleMetric) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SimpleMetric.ProtoReflect.Descriptor instead. +func (*SimpleMetric) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *SimpleMetric) GetType() SimpleMetric_Type { + if x != nil { + return x.Type + } + return SimpleMetric_COUNTER +} + +func (x *SimpleMetric) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *SimpleMetric) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_envoy_admin_v3_metrics_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_metrics_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x1e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, + 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x42, 0x75, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_metrics_proto_rawDescOnce sync.Once + file_envoy_admin_v3_metrics_proto_rawDescData = file_envoy_admin_v3_metrics_proto_rawDesc +) + +func file_envoy_admin_v3_metrics_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_metrics_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_metrics_proto_rawDescData) + }) + return file_envoy_admin_v3_metrics_proto_rawDescData +} + +var file_envoy_admin_v3_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_admin_v3_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_metrics_proto_goTypes = []interface{}{ + (SimpleMetric_Type)(0), // 0: envoy.admin.v3.SimpleMetric.Type + (*SimpleMetric)(nil), // 1: envoy.admin.v3.SimpleMetric +} +var file_envoy_admin_v3_metrics_proto_depIdxs = []int32{ + 0, // 0: envoy.admin.v3.SimpleMetric.type:type_name -> envoy.admin.v3.SimpleMetric.Type + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_metrics_proto_init() } +func file_envoy_admin_v3_metrics_proto_init() { + if File_envoy_admin_v3_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SimpleMetric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_metrics_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_metrics_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_metrics_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_metrics_proto_msgTypes, + }.Build() + File_envoy_admin_v3_metrics_proto = out.File + file_envoy_admin_v3_metrics_proto_rawDesc = nil + file_envoy_admin_v3_metrics_proto_goTypes = nil + file_envoy_admin_v3_metrics_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go new file mode 100644 index 000000000..903d70e19 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go @@ -0,0 +1,142 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SimpleMetric with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SimpleMetric) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SimpleMetric with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SimpleMetricMultiError, or +// nil if none found. +func (m *SimpleMetric) ValidateAll() error { + return m.validate(true) +} + +func (m *SimpleMetric) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Type + + // no validation rules for Value + + // no validation rules for Name + + if len(errors) > 0 { + return SimpleMetricMultiError(errors) + } + + return nil +} + +// SimpleMetricMultiError is an error wrapping multiple validation errors +// returned by SimpleMetric.ValidateAll() if the designated constraints aren't met. +type SimpleMetricMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SimpleMetricMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SimpleMetricMultiError) AllErrors() []error { return m } + +// SimpleMetricValidationError is the validation error returned by +// SimpleMetric.Validate if the designated constraints aren't met. +type SimpleMetricValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SimpleMetricValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SimpleMetricValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SimpleMetricValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SimpleMetricValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SimpleMetricValidationError) ErrorName() string { return "SimpleMetricValidationError" } + +// Error satisfies the builtin error interface +func (e SimpleMetricValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSimpleMetric.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SimpleMetricValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SimpleMetricValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go new file mode 100644 index 000000000..0c09ae045 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go @@ -0,0 +1,89 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SimpleMetric) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SimpleMetric) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SimpleMetric) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SimpleMetric) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go new file mode 100644 index 000000000..44f35183d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run +// under :option:`--enable-mutex-tracing`. For more information, see the “absl::Mutex“ +// [docs](https://abseil.io/about/design/mutex#extra-features). +// +// *NB*: The wait cycles below are measured by “absl::base_internal::CycleClock“, and may not +// correspond to core clock frequency. For more information, see the “CycleClock“ +// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). +type MutexStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of individual mutex contentions which have occurred since startup. + NumContentions uint64 `protobuf:"varint,1,opt,name=num_contentions,json=numContentions,proto3" json:"num_contentions,omitempty"` + // The length of the current contention wait cycle. + CurrentWaitCycles uint64 `protobuf:"varint,2,opt,name=current_wait_cycles,json=currentWaitCycles,proto3" json:"current_wait_cycles,omitempty"` + // The lifetime total of all contention wait cycles. + LifetimeWaitCycles uint64 `protobuf:"varint,3,opt,name=lifetime_wait_cycles,json=lifetimeWaitCycles,proto3" json:"lifetime_wait_cycles,omitempty"` +} + +func (x *MutexStats) Reset() { + *x = MutexStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MutexStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MutexStats) ProtoMessage() {} + +func (x *MutexStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MutexStats.ProtoReflect.Descriptor instead. +func (*MutexStats) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *MutexStats) GetNumContentions() uint64 { + if x != nil { + return x.NumContentions + } + return 0 +} + +func (x *MutexStats) GetCurrentWaitCycles() uint64 { + if x != nil { + return x.CurrentWaitCycles + } + return 0 +} + +func (x *MutexStats) GetLifetimeWaitCycles() uint64 { + if x != nil { + return x.LifetimeWaitCycles + } + return 0 +} + +var File_envoy_admin_v3_mutex_stats_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_mutex_stats_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6e, 0x75, + 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x6c, 0x69, 0x66, 0x65, + 0x74, 0x69, 0x6d, 0x65, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x75, 0x74, 0x65, 0x78, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4d, + 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_mutex_stats_proto_rawDescOnce sync.Once + file_envoy_admin_v3_mutex_stats_proto_rawDescData = file_envoy_admin_v3_mutex_stats_proto_rawDesc +) + +func file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_mutex_stats_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_mutex_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_mutex_stats_proto_rawDescData) + }) + return file_envoy_admin_v3_mutex_stats_proto_rawDescData +} + +var file_envoy_admin_v3_mutex_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_mutex_stats_proto_goTypes = []interface{}{ + (*MutexStats)(nil), // 0: envoy.admin.v3.MutexStats +} +var file_envoy_admin_v3_mutex_stats_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_mutex_stats_proto_init() } +func file_envoy_admin_v3_mutex_stats_proto_init() { + if File_envoy_admin_v3_mutex_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_mutex_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MutexStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_mutex_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_mutex_stats_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_mutex_stats_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_mutex_stats_proto_msgTypes, + }.Build() + File_envoy_admin_v3_mutex_stats_proto = out.File + file_envoy_admin_v3_mutex_stats_proto_rawDesc = nil + file_envoy_admin_v3_mutex_stats_proto_goTypes = nil + file_envoy_admin_v3_mutex_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go new file mode 100644 index 000000000..236524c54 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go @@ -0,0 +1,142 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MutexStats with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MutexStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MutexStats with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MutexStatsMultiError, or +// nil if none found. +func (m *MutexStats) ValidateAll() error { + return m.validate(true) +} + +func (m *MutexStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumContentions + + // no validation rules for CurrentWaitCycles + + // no validation rules for LifetimeWaitCycles + + if len(errors) > 0 { + return MutexStatsMultiError(errors) + } + + return nil +} + +// MutexStatsMultiError is an error wrapping multiple validation errors +// returned by MutexStats.ValidateAll() if the designated constraints aren't met. +type MutexStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MutexStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MutexStatsMultiError) AllErrors() []error { return m } + +// MutexStatsValidationError is the validation error returned by +// MutexStats.Validate if the designated constraints aren't met. +type MutexStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MutexStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MutexStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MutexStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MutexStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MutexStatsValidationError) ErrorName() string { return "MutexStatsValidationError" } + +// Error satisfies the builtin error interface +func (e MutexStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMutexStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MutexStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MutexStatsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go new file mode 100644 index 000000000..4318cbc99 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MutexStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutexStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MutexStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LifetimeWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LifetimeWaitCycles)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CurrentWaitCycles)) + i-- + dAtA[i] = 0x10 + } + if m.NumContentions != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumContentions)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MutexStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumContentions != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumContentions)) + } + if m.CurrentWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CurrentWaitCycles)) + } + if m.LifetimeWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LifetimeWaitCycles)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go new file mode 100644 index 000000000..4538ff30f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go @@ -0,0 +1,975 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerInfo_State int32 + +const ( + // Server is live and serving traffic. + ServerInfo_LIVE ServerInfo_State = 0 + // Server is draining listeners in response to external health checks failing. + ServerInfo_DRAINING ServerInfo_State = 1 + // Server has not yet completed cluster manager initialization. + ServerInfo_PRE_INITIALIZING ServerInfo_State = 2 + // Server is running the cluster manager initialization callbacks (e.g., RDS). + ServerInfo_INITIALIZING ServerInfo_State = 3 +) + +// Enum value maps for ServerInfo_State. +var ( + ServerInfo_State_name = map[int32]string{ + 0: "LIVE", + 1: "DRAINING", + 2: "PRE_INITIALIZING", + 3: "INITIALIZING", + } + ServerInfo_State_value = map[string]int32{ + "LIVE": 0, + "DRAINING": 1, + "PRE_INITIALIZING": 2, + "INITIALIZING": 3, + } +) + +func (x ServerInfo_State) Enum() *ServerInfo_State { + p := new(ServerInfo_State) + *p = x + return p +} + +func (x ServerInfo_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServerInfo_State) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[0].Descriptor() +} + +func (ServerInfo_State) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[0] +} + +func (x ServerInfo_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServerInfo_State.Descriptor instead. +func (ServerInfo_State) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0, 0} +} + +type CommandLineOptions_IpVersion int32 + +const ( + CommandLineOptions_v4 CommandLineOptions_IpVersion = 0 + CommandLineOptions_v6 CommandLineOptions_IpVersion = 1 +) + +// Enum value maps for CommandLineOptions_IpVersion. +var ( + CommandLineOptions_IpVersion_name = map[int32]string{ + 0: "v4", + 1: "v6", + } + CommandLineOptions_IpVersion_value = map[string]int32{ + "v4": 0, + "v6": 1, + } +) + +func (x CommandLineOptions_IpVersion) Enum() *CommandLineOptions_IpVersion { + p := new(CommandLineOptions_IpVersion) + *p = x + return p +} + +func (x CommandLineOptions_IpVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_IpVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[1].Descriptor() +} + +func (CommandLineOptions_IpVersion) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[1] +} + +func (x CommandLineOptions_IpVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_IpVersion.Descriptor instead. +func (CommandLineOptions_IpVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 0} +} + +type CommandLineOptions_Mode int32 + +const ( + // Validate configs and then serve traffic normally. + CommandLineOptions_Serve CommandLineOptions_Mode = 0 + // Validate configs and exit. + CommandLineOptions_Validate CommandLineOptions_Mode = 1 + // Completely load and initialize the config, and then exit without running the listener loop. + CommandLineOptions_InitOnly CommandLineOptions_Mode = 2 +) + +// Enum value maps for CommandLineOptions_Mode. +var ( + CommandLineOptions_Mode_name = map[int32]string{ + 0: "Serve", + 1: "Validate", + 2: "InitOnly", + } + CommandLineOptions_Mode_value = map[string]int32{ + "Serve": 0, + "Validate": 1, + "InitOnly": 2, + } +) + +func (x CommandLineOptions_Mode) Enum() *CommandLineOptions_Mode { + p := new(CommandLineOptions_Mode) + *p = x + return p +} + +func (x CommandLineOptions_Mode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_Mode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[2].Descriptor() +} + +func (CommandLineOptions_Mode) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[2] +} + +func (x CommandLineOptions_Mode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_Mode.Descriptor instead. +func (CommandLineOptions_Mode) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 1} +} + +type CommandLineOptions_DrainStrategy int32 + +const ( + // Gradually discourage connections over the course of the drain period. + CommandLineOptions_Gradual CommandLineOptions_DrainStrategy = 0 + // Discourage all connections for the duration of the drain sequence. + CommandLineOptions_Immediate CommandLineOptions_DrainStrategy = 1 +) + +// Enum value maps for CommandLineOptions_DrainStrategy. +var ( + CommandLineOptions_DrainStrategy_name = map[int32]string{ + 0: "Gradual", + 1: "Immediate", + } + CommandLineOptions_DrainStrategy_value = map[string]int32{ + "Gradual": 0, + "Immediate": 1, + } +) + +func (x CommandLineOptions_DrainStrategy) Enum() *CommandLineOptions_DrainStrategy { + p := new(CommandLineOptions_DrainStrategy) + *p = x + return p +} + +func (x CommandLineOptions_DrainStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_DrainStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[3].Descriptor() +} + +func (CommandLineOptions_DrainStrategy) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[3] +} + +func (x CommandLineOptions_DrainStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_DrainStrategy.Descriptor instead. +func (CommandLineOptions_DrainStrategy) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 2} +} + +// Proto representation of the value returned by /server_info, containing +// server version/server status information. +// [#next-free-field: 8] +type ServerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Server version. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // State of the server. + State ServerInfo_State `protobuf:"varint,2,opt,name=state,proto3,enum=envoy.admin.v3.ServerInfo_State" json:"state,omitempty"` + // Uptime since current epoch was started. + UptimeCurrentEpoch *durationpb.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"` + // Uptime since the start of the first epoch. + UptimeAllEpochs *durationpb.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"` + // Hot restart version. + HotRestartVersion string `protobuf:"bytes,5,opt,name=hot_restart_version,json=hotRestartVersion,proto3" json:"hot_restart_version,omitempty"` + // Command line options the server is currently running with. + CommandLineOptions *CommandLineOptions `protobuf:"bytes,6,opt,name=command_line_options,json=commandLineOptions,proto3" json:"command_line_options,omitempty"` + // Populated node identity of this server. + Node *v3.Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"` +} + +func (x *ServerInfo) Reset() { + *x = ServerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerInfo) ProtoMessage() {} + +func (x *ServerInfo) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead. +func (*ServerInfo) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ServerInfo) GetState() ServerInfo_State { + if x != nil { + return x.State + } + return ServerInfo_LIVE +} + +func (x *ServerInfo) GetUptimeCurrentEpoch() *durationpb.Duration { + if x != nil { + return x.UptimeCurrentEpoch + } + return nil +} + +func (x *ServerInfo) GetUptimeAllEpochs() *durationpb.Duration { + if x != nil { + return x.UptimeAllEpochs + } + return nil +} + +func (x *ServerInfo) GetHotRestartVersion() string { + if x != nil { + return x.HotRestartVersion + } + return "" +} + +func (x *ServerInfo) GetCommandLineOptions() *CommandLineOptions { + if x != nil { + return x.CommandLineOptions + } + return nil +} + +func (x *ServerInfo) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +// [#next-free-field: 41] +type CommandLineOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See :option:`--base-id` for details. + BaseId uint64 `protobuf:"varint,1,opt,name=base_id,json=baseId,proto3" json:"base_id,omitempty"` + // See :option:`--use-dynamic-base-id` for details. + UseDynamicBaseId bool `protobuf:"varint,31,opt,name=use_dynamic_base_id,json=useDynamicBaseId,proto3" json:"use_dynamic_base_id,omitempty"` + // See :option:`--skip-hot-restart-on-no-parent` for details. + SkipHotRestartOnNoParent bool `protobuf:"varint,39,opt,name=skip_hot_restart_on_no_parent,json=skipHotRestartOnNoParent,proto3" json:"skip_hot_restart_on_no_parent,omitempty"` + // See :option:`--skip-hot-restart-parent-stats` for details. + SkipHotRestartParentStats bool `protobuf:"varint,40,opt,name=skip_hot_restart_parent_stats,json=skipHotRestartParentStats,proto3" json:"skip_hot_restart_parent_stats,omitempty"` + // See :option:`--base-id-path` for details. + BaseIdPath string `protobuf:"bytes,32,opt,name=base_id_path,json=baseIdPath,proto3" json:"base_id_path,omitempty"` + // See :option:`--concurrency` for details. + Concurrency uint32 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // See :option:`--config-path` for details. + ConfigPath string `protobuf:"bytes,3,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` + // See :option:`--config-yaml` for details. + ConfigYaml string `protobuf:"bytes,4,opt,name=config_yaml,json=configYaml,proto3" json:"config_yaml,omitempty"` + // See :option:`--allow-unknown-static-fields` for details. + AllowUnknownStaticFields bool `protobuf:"varint,5,opt,name=allow_unknown_static_fields,json=allowUnknownStaticFields,proto3" json:"allow_unknown_static_fields,omitempty"` + // See :option:`--reject-unknown-dynamic-fields` for details. + RejectUnknownDynamicFields bool `protobuf:"varint,26,opt,name=reject_unknown_dynamic_fields,json=rejectUnknownDynamicFields,proto3" json:"reject_unknown_dynamic_fields,omitempty"` + // See :option:`--ignore-unknown-dynamic-fields` for details. + IgnoreUnknownDynamicFields bool `protobuf:"varint,30,opt,name=ignore_unknown_dynamic_fields,json=ignoreUnknownDynamicFields,proto3" json:"ignore_unknown_dynamic_fields,omitempty"` + // See :option:`--admin-address-path` for details. + AdminAddressPath string `protobuf:"bytes,6,opt,name=admin_address_path,json=adminAddressPath,proto3" json:"admin_address_path,omitempty"` + // See :option:`--local-address-ip-version` for details. + LocalAddressIpVersion CommandLineOptions_IpVersion `protobuf:"varint,7,opt,name=local_address_ip_version,json=localAddressIpVersion,proto3,enum=envoy.admin.v3.CommandLineOptions_IpVersion" json:"local_address_ip_version,omitempty"` + // See :option:`--log-level` for details. + LogLevel string `protobuf:"bytes,8,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` + // See :option:`--component-log-level` for details. + ComponentLogLevel string `protobuf:"bytes,9,opt,name=component_log_level,json=componentLogLevel,proto3" json:"component_log_level,omitempty"` + // See :option:`--log-format` for details. + LogFormat string `protobuf:"bytes,10,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"` + // See :option:`--log-format-escaped` for details. + LogFormatEscaped bool `protobuf:"varint,27,opt,name=log_format_escaped,json=logFormatEscaped,proto3" json:"log_format_escaped,omitempty"` + // See :option:`--log-path` for details. + LogPath string `protobuf:"bytes,11,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + // See :option:`--service-cluster` for details. + ServiceCluster string `protobuf:"bytes,13,opt,name=service_cluster,json=serviceCluster,proto3" json:"service_cluster,omitempty"` + // See :option:`--service-node` for details. + ServiceNode string `protobuf:"bytes,14,opt,name=service_node,json=serviceNode,proto3" json:"service_node,omitempty"` + // See :option:`--service-zone` for details. + ServiceZone string `protobuf:"bytes,15,opt,name=service_zone,json=serviceZone,proto3" json:"service_zone,omitempty"` + // See :option:`--file-flush-interval-msec` for details. + FileFlushInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"` + // See :option:`--drain-time-s` for details. + DrainTime *durationpb.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"` + // See :option:`--drain-strategy` for details. + DrainStrategy CommandLineOptions_DrainStrategy `protobuf:"varint,33,opt,name=drain_strategy,json=drainStrategy,proto3,enum=envoy.admin.v3.CommandLineOptions_DrainStrategy" json:"drain_strategy,omitempty"` + // See :option:`--parent-shutdown-time-s` for details. + ParentShutdownTime *durationpb.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"` + // See :option:`--mode` for details. + Mode CommandLineOptions_Mode `protobuf:"varint,19,opt,name=mode,proto3,enum=envoy.admin.v3.CommandLineOptions_Mode" json:"mode,omitempty"` + // See :option:`--disable-hot-restart` for details. + DisableHotRestart bool `protobuf:"varint,22,opt,name=disable_hot_restart,json=disableHotRestart,proto3" json:"disable_hot_restart,omitempty"` + // See :option:`--enable-mutex-tracing` for details. + EnableMutexTracing bool `protobuf:"varint,23,opt,name=enable_mutex_tracing,json=enableMutexTracing,proto3" json:"enable_mutex_tracing,omitempty"` + // See :option:`--restart-epoch` for details. + RestartEpoch uint32 `protobuf:"varint,24,opt,name=restart_epoch,json=restartEpoch,proto3" json:"restart_epoch,omitempty"` + // See :option:`--cpuset-threads` for details. + CpusetThreads bool `protobuf:"varint,25,opt,name=cpuset_threads,json=cpusetThreads,proto3" json:"cpuset_threads,omitempty"` + // See :option:`--disable-extensions` for details. + DisabledExtensions []string `protobuf:"bytes,28,rep,name=disabled_extensions,json=disabledExtensions,proto3" json:"disabled_extensions,omitempty"` + // See :option:`--enable-fine-grain-logging` for details. + EnableFineGrainLogging bool `protobuf:"varint,34,opt,name=enable_fine_grain_logging,json=enableFineGrainLogging,proto3" json:"enable_fine_grain_logging,omitempty"` + // See :option:`--socket-path` for details. + SocketPath string `protobuf:"bytes,35,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"` + // See :option:`--socket-mode` for details. + SocketMode uint32 `protobuf:"varint,36,opt,name=socket_mode,json=socketMode,proto3" json:"socket_mode,omitempty"` + // See :option:`--enable-core-dump` for details. + EnableCoreDump bool `protobuf:"varint,37,opt,name=enable_core_dump,json=enableCoreDump,proto3" json:"enable_core_dump,omitempty"` + // See :option:`--stats-tag` for details. + StatsTag []string `protobuf:"bytes,38,rep,name=stats_tag,json=statsTag,proto3" json:"stats_tag,omitempty"` +} + +func (x *CommandLineOptions) Reset() { + *x = CommandLineOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommandLineOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommandLineOptions) ProtoMessage() {} + +func (x *CommandLineOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommandLineOptions.ProtoReflect.Descriptor instead. +func (*CommandLineOptions) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1} +} + +func (x *CommandLineOptions) GetBaseId() uint64 { + if x != nil { + return x.BaseId + } + return 0 +} + +func (x *CommandLineOptions) GetUseDynamicBaseId() bool { + if x != nil { + return x.UseDynamicBaseId + } + return false +} + +func (x *CommandLineOptions) GetSkipHotRestartOnNoParent() bool { + if x != nil { + return x.SkipHotRestartOnNoParent + } + return false +} + +func (x *CommandLineOptions) GetSkipHotRestartParentStats() bool { + if x != nil { + return x.SkipHotRestartParentStats + } + return false +} + +func (x *CommandLineOptions) GetBaseIdPath() string { + if x != nil { + return x.BaseIdPath + } + return "" +} + +func (x *CommandLineOptions) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *CommandLineOptions) GetConfigPath() string { + if x != nil { + return x.ConfigPath + } + return "" +} + +func (x *CommandLineOptions) GetConfigYaml() string { + if x != nil { + return x.ConfigYaml + } + return "" +} + +func (x *CommandLineOptions) GetAllowUnknownStaticFields() bool { + if x != nil { + return x.AllowUnknownStaticFields + } + return false +} + +func (x *CommandLineOptions) GetRejectUnknownDynamicFields() bool { + if x != nil { + return x.RejectUnknownDynamicFields + } + return false +} + +func (x *CommandLineOptions) GetIgnoreUnknownDynamicFields() bool { + if x != nil { + return x.IgnoreUnknownDynamicFields + } + return false +} + +func (x *CommandLineOptions) GetAdminAddressPath() string { + if x != nil { + return x.AdminAddressPath + } + return "" +} + +func (x *CommandLineOptions) GetLocalAddressIpVersion() CommandLineOptions_IpVersion { + if x != nil { + return x.LocalAddressIpVersion + } + return CommandLineOptions_v4 +} + +func (x *CommandLineOptions) GetLogLevel() string { + if x != nil { + return x.LogLevel + } + return "" +} + +func (x *CommandLineOptions) GetComponentLogLevel() string { + if x != nil { + return x.ComponentLogLevel + } + return "" +} + +func (x *CommandLineOptions) GetLogFormat() string { + if x != nil { + return x.LogFormat + } + return "" +} + +func (x *CommandLineOptions) GetLogFormatEscaped() bool { + if x != nil { + return x.LogFormatEscaped + } + return false +} + +func (x *CommandLineOptions) GetLogPath() string { + if x != nil { + return x.LogPath + } + return "" +} + +func (x *CommandLineOptions) GetServiceCluster() string { + if x != nil { + return x.ServiceCluster + } + return "" +} + +func (x *CommandLineOptions) GetServiceNode() string { + if x != nil { + return x.ServiceNode + } + return "" +} + +func (x *CommandLineOptions) GetServiceZone() string { + if x != nil { + return x.ServiceZone + } + return "" +} + +func (x *CommandLineOptions) GetFileFlushInterval() *durationpb.Duration { + if x != nil { + return x.FileFlushInterval + } + return nil +} + +func (x *CommandLineOptions) GetDrainTime() *durationpb.Duration { + if x != nil { + return x.DrainTime + } + return nil +} + +func (x *CommandLineOptions) GetDrainStrategy() CommandLineOptions_DrainStrategy { + if x != nil { + return x.DrainStrategy + } + return CommandLineOptions_Gradual +} + +func (x *CommandLineOptions) GetParentShutdownTime() *durationpb.Duration { + if x != nil { + return x.ParentShutdownTime + } + return nil +} + +func (x *CommandLineOptions) GetMode() CommandLineOptions_Mode { + if x != nil { + return x.Mode + } + return CommandLineOptions_Serve +} + +func (x *CommandLineOptions) GetDisableHotRestart() bool { + if x != nil { + return x.DisableHotRestart + } + return false +} + +func (x *CommandLineOptions) GetEnableMutexTracing() bool { + if x != nil { + return x.EnableMutexTracing + } + return false +} + +func (x *CommandLineOptions) GetRestartEpoch() uint32 { + if x != nil { + return x.RestartEpoch + } + return 0 +} + +func (x *CommandLineOptions) GetCpusetThreads() bool { + if x != nil { + return x.CpusetThreads + } + return false +} + +func (x *CommandLineOptions) GetDisabledExtensions() []string { + if x != nil { + return x.DisabledExtensions + } + return nil +} + +func (x *CommandLineOptions) GetEnableFineGrainLogging() bool { + if x != nil { + return x.EnableFineGrainLogging + } + return false +} + +func (x *CommandLineOptions) GetSocketPath() string { + if x != nil { + return x.SocketPath + } + return "" +} + +func (x *CommandLineOptions) GetSocketMode() uint32 { + if x != nil { + return x.SocketMode + } + return 0 +} + +func (x *CommandLineOptions) GetEnableCoreDump() bool { + if x != nil { + return x.EnableCoreDump + } + return false +} + +func (x *CommandLineOptions) GetStatsTag() []string { + if x != nil { + return x.StatsTag + } + return nil +} + +var File_envoy_admin_v3_server_info_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_server_info_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, 0x04, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x41, 0x6c, 0x6c, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x68, 0x6f, + 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x14, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x22, 0x47, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x56, + 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, + 0x01, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x52, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, + 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, + 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x22, 0xde, 0x0f, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x62, 0x61, 0x73, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x12, 0x2d, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, + 0x73, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, + 0x3f, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x6e, 0x4e, 0x6f, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x40, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a, 0x18, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f, 0x67, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x7a, + 0x6f, 0x6e, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x66, 0x69, 0x6c, 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x64, + 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2e, + 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, + 0x70, 0x75, 0x73, 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, + 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67, 0x72, 0x61, + 0x69, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72, 0x61, 0x69, + 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x25, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x72, 0x65, + 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, + 0x67, 0x18, 0x26, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, + 0x67, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, + 0x0a, 0x02, 0x76, 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01, 0x22, 0x2d, + 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76, 0x65, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22, 0x2b, 0x0a, + 0x0d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0b, + 0x0a, 0x07, 0x47, 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, + 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, + 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, + 0x04, 0x08, 0x14, 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x1d, 0x10, + 0x1e, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x6f, 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x52, 0x11, + 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_server_info_proto_rawDescOnce sync.Once + file_envoy_admin_v3_server_info_proto_rawDescData = file_envoy_admin_v3_server_info_proto_rawDesc +) + +func file_envoy_admin_v3_server_info_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_server_info_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_server_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_server_info_proto_rawDescData) + }) + return file_envoy_admin_v3_server_info_proto_rawDescData +} + +var file_envoy_admin_v3_server_info_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_envoy_admin_v3_server_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_server_info_proto_goTypes = []interface{}{ + (ServerInfo_State)(0), // 0: envoy.admin.v3.ServerInfo.State + (CommandLineOptions_IpVersion)(0), // 1: envoy.admin.v3.CommandLineOptions.IpVersion + (CommandLineOptions_Mode)(0), // 2: envoy.admin.v3.CommandLineOptions.Mode + (CommandLineOptions_DrainStrategy)(0), // 3: envoy.admin.v3.CommandLineOptions.DrainStrategy + (*ServerInfo)(nil), // 4: envoy.admin.v3.ServerInfo + (*CommandLineOptions)(nil), // 5: envoy.admin.v3.CommandLineOptions + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*v3.Node)(nil), // 7: envoy.config.core.v3.Node +} +var file_envoy_admin_v3_server_info_proto_depIdxs = []int32{ + 0, // 0: envoy.admin.v3.ServerInfo.state:type_name -> envoy.admin.v3.ServerInfo.State + 6, // 1: envoy.admin.v3.ServerInfo.uptime_current_epoch:type_name -> google.protobuf.Duration + 6, // 2: envoy.admin.v3.ServerInfo.uptime_all_epochs:type_name -> google.protobuf.Duration + 5, // 3: envoy.admin.v3.ServerInfo.command_line_options:type_name -> envoy.admin.v3.CommandLineOptions + 7, // 4: envoy.admin.v3.ServerInfo.node:type_name -> envoy.config.core.v3.Node + 1, // 5: envoy.admin.v3.CommandLineOptions.local_address_ip_version:type_name -> envoy.admin.v3.CommandLineOptions.IpVersion + 6, // 6: envoy.admin.v3.CommandLineOptions.file_flush_interval:type_name -> google.protobuf.Duration + 6, // 7: envoy.admin.v3.CommandLineOptions.drain_time:type_name -> google.protobuf.Duration + 3, // 8: envoy.admin.v3.CommandLineOptions.drain_strategy:type_name -> envoy.admin.v3.CommandLineOptions.DrainStrategy + 6, // 9: envoy.admin.v3.CommandLineOptions.parent_shutdown_time:type_name -> google.protobuf.Duration + 2, // 10: envoy.admin.v3.CommandLineOptions.mode:type_name -> envoy.admin.v3.CommandLineOptions.Mode + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_server_info_proto_init() } +func file_envoy_admin_v3_server_info_proto_init() { + if File_envoy_admin_v3_server_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_server_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_server_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommandLineOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_server_info_proto_rawDesc, + NumEnums: 4, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_server_info_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_server_info_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_server_info_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_server_info_proto_msgTypes, + }.Build() + File_envoy_admin_v3_server_info_proto = out.File + file_envoy_admin_v3_server_info_proto_rawDesc = nil + file_envoy_admin_v3_server_info_proto_goTypes = nil + file_envoy_admin_v3_server_info_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go new file mode 100644 index 000000000..8db097828 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go @@ -0,0 +1,509 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ServerInfo with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ServerInfo) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerInfo with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ServerInfoMultiError, or +// nil if none found. +func (m *ServerInfo) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerInfo) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Version + + // no validation rules for State + + if all { + switch v := interface{}(m.GetUptimeCurrentEpoch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUptimeCurrentEpoch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUptimeAllEpochs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUptimeAllEpochs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HotRestartVersion + + if all { + switch v := interface{}(m.GetCommandLineOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommandLineOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ServerInfoMultiError(errors) + } + + return nil +} + +// ServerInfoMultiError is an error wrapping multiple validation errors +// returned by ServerInfo.ValidateAll() if the designated constraints aren't met. +type ServerInfoMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerInfoMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerInfoMultiError) AllErrors() []error { return m } + +// ServerInfoValidationError is the validation error returned by +// ServerInfo.Validate if the designated constraints aren't met. +type ServerInfoValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerInfoValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerInfoValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerInfoValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerInfoValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerInfoValidationError) ErrorName() string { return "ServerInfoValidationError" } + +// Error satisfies the builtin error interface +func (e ServerInfoValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerInfo.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerInfoValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerInfoValidationError{} + +// Validate checks the field values on CommandLineOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CommandLineOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommandLineOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommandLineOptionsMultiError, or nil if none found. +func (m *CommandLineOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *CommandLineOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BaseId + + // no validation rules for UseDynamicBaseId + + // no validation rules for SkipHotRestartOnNoParent + + // no validation rules for SkipHotRestartParentStats + + // no validation rules for BaseIdPath + + // no validation rules for Concurrency + + // no validation rules for ConfigPath + + // no validation rules for ConfigYaml + + // no validation rules for AllowUnknownStaticFields + + // no validation rules for RejectUnknownDynamicFields + + // no validation rules for IgnoreUnknownDynamicFields + + // no validation rules for AdminAddressPath + + // no validation rules for LocalAddressIpVersion + + // no validation rules for LogLevel + + // no validation rules for ComponentLogLevel + + // no validation rules for LogFormat + + // no validation rules for LogFormatEscaped + + // no validation rules for LogPath + + // no validation rules for ServiceCluster + + // no validation rules for ServiceNode + + // no validation rules for ServiceZone + + if all { + switch v := interface{}(m.GetFileFlushInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFileFlushInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDrainTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DrainStrategy + + if all { + switch v := interface{}(m.GetParentShutdownTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentShutdownTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Mode + + // no validation rules for DisableHotRestart + + // no validation rules for EnableMutexTracing + + // no validation rules for RestartEpoch + + // no validation rules for CpusetThreads + + // no validation rules for EnableFineGrainLogging + + // no validation rules for SocketPath + + // no validation rules for SocketMode + + // no validation rules for EnableCoreDump + + if len(errors) > 0 { + return CommandLineOptionsMultiError(errors) + } + + return nil +} + +// CommandLineOptionsMultiError is an error wrapping multiple validation errors +// returned by CommandLineOptions.ValidateAll() if the designated constraints +// aren't met. +type CommandLineOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommandLineOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommandLineOptionsMultiError) AllErrors() []error { return m } + +// CommandLineOptionsValidationError is the validation error returned by +// CommandLineOptions.Validate if the designated constraints aren't met. +type CommandLineOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommandLineOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommandLineOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommandLineOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommandLineOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommandLineOptionsValidationError) ErrorName() string { + return "CommandLineOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e CommandLineOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommandLineOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommandLineOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommandLineOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go new file mode 100644 index 000000000..5bf55561e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go @@ -0,0 +1,671 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ServerInfo) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerInfo) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ServerInfo) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CommandLineOptions != nil { + size, err := m.CommandLineOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HotRestartVersion) > 0 { + i -= len(m.HotRestartVersion) + copy(dAtA[i:], m.HotRestartVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HotRestartVersion))) + i-- + dAtA[i] = 0x2a + } + if m.UptimeAllEpochs != nil { + size, err := (*durationpb.Duration)(m.UptimeAllEpochs).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.UptimeCurrentEpoch != nil { + size, err := (*durationpb.Duration)(m.UptimeCurrentEpoch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommandLineOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandLineOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommandLineOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipHotRestartParentStats { + i-- + if m.SkipHotRestartParentStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if m.SkipHotRestartOnNoParent { + i-- + if m.SkipHotRestartOnNoParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if len(m.StatsTag) > 0 { + for iNdEx := len(m.StatsTag) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StatsTag[iNdEx]) + copy(dAtA[i:], m.StatsTag[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatsTag[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.EnableCoreDump { + i-- + if m.EnableCoreDump { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.SocketMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SocketMode)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa0 + } + if len(m.SocketPath) > 0 { + i -= len(m.SocketPath) + copy(dAtA[i:], m.SocketPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SocketPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.EnableFineGrainLogging { + i-- + if m.EnableFineGrainLogging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.DrainStrategy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainStrategy)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if len(m.BaseIdPath) > 0 { + i -= len(m.BaseIdPath) + copy(dAtA[i:], m.BaseIdPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BaseIdPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.UseDynamicBaseId { + i-- + if m.UseDynamicBaseId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.IgnoreUnknownDynamicFields { + i-- + if m.IgnoreUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if len(m.DisabledExtensions) > 0 { + for iNdEx := len(m.DisabledExtensions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DisabledExtensions[iNdEx]) + copy(dAtA[i:], m.DisabledExtensions[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisabledExtensions[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.LogFormatEscaped { + i-- + if m.LogFormatEscaped { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.RejectUnknownDynamicFields { + i-- + if m.RejectUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.CpusetThreads { + i-- + if m.CpusetThreads { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.RestartEpoch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RestartEpoch)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.EnableMutexTracing { + i-- + if m.EnableMutexTracing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.DisableHotRestart { + i-- + if m.DisableHotRestart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.ParentShutdownTime != nil { + size, err := (*durationpb.Duration)(m.ParentShutdownTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.DrainTime != nil { + size, err := (*durationpb.Duration)(m.DrainTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FileFlushInterval != nil { + size, err := (*durationpb.Duration)(m.FileFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.ServiceZone) > 0 { + i -= len(m.ServiceZone) + copy(dAtA[i:], m.ServiceZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceZone))) + i-- + dAtA[i] = 0x7a + } + if len(m.ServiceNode) > 0 { + i -= len(m.ServiceNode) + copy(dAtA[i:], m.ServiceNode) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceNode))) + i-- + dAtA[i] = 0x72 + } + if len(m.ServiceCluster) > 0 { + i -= len(m.ServiceCluster) + copy(dAtA[i:], m.ServiceCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceCluster))) + i-- + dAtA[i] = 0x6a + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x5a + } + if len(m.LogFormat) > 0 { + i -= len(m.LogFormat) + copy(dAtA[i:], m.LogFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogFormat))) + i-- + dAtA[i] = 0x52 + } + if len(m.ComponentLogLevel) > 0 { + i -= len(m.ComponentLogLevel) + copy(dAtA[i:], m.ComponentLogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ComponentLogLevel))) + i-- + dAtA[i] = 0x4a + } + if len(m.LogLevel) > 0 { + i -= len(m.LogLevel) + copy(dAtA[i:], m.LogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogLevel))) + i-- + dAtA[i] = 0x42 + } + if m.LocalAddressIpVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LocalAddressIpVersion)) + i-- + dAtA[i] = 0x38 + } + if len(m.AdminAddressPath) > 0 { + i -= len(m.AdminAddressPath) + copy(dAtA[i:], m.AdminAddressPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AdminAddressPath))) + i-- + dAtA[i] = 0x32 + } + if m.AllowUnknownStaticFields { + i-- + if m.AllowUnknownStaticFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ConfigYaml) > 0 { + i -= len(m.ConfigYaml) + copy(dAtA[i:], m.ConfigYaml) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigYaml))) + i-- + dAtA[i] = 0x22 + } + if len(m.ConfigPath) > 0 { + i -= len(m.ConfigPath) + copy(dAtA[i:], m.ConfigPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigPath))) + i-- + dAtA[i] = 0x1a + } + if m.Concurrency != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x10 + } + if m.BaseId != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BaseId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ServerInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.UptimeCurrentEpoch != nil { + l = (*durationpb.Duration)(m.UptimeCurrentEpoch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UptimeAllEpochs != nil { + l = (*durationpb.Duration)(m.UptimeAllEpochs).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HotRestartVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommandLineOptions != nil { + l = m.CommandLineOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommandLineOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseId != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BaseId)) + } + if m.Concurrency != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Concurrency)) + } + l = len(m.ConfigPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConfigYaml) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowUnknownStaticFields { + n += 2 + } + l = len(m.AdminAddressPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddressIpVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LocalAddressIpVersion)) + } + l = len(m.LogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ComponentLogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogFormat) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceNode) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FileFlushInterval != nil { + l = (*durationpb.Duration)(m.FileFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTime != nil { + l = (*durationpb.Duration)(m.DrainTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ParentShutdownTime != nil { + l = (*durationpb.Duration)(m.ParentShutdownTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + if m.DisableHotRestart { + n += 3 + } + if m.EnableMutexTracing { + n += 3 + } + if m.RestartEpoch != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.RestartEpoch)) + } + if m.CpusetThreads { + n += 3 + } + if m.RejectUnknownDynamicFields { + n += 3 + } + if m.LogFormatEscaped { + n += 3 + } + if len(m.DisabledExtensions) > 0 { + for _, s := range m.DisabledExtensions { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreUnknownDynamicFields { + n += 3 + } + if m.UseDynamicBaseId { + n += 3 + } + l = len(m.BaseIdPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainStrategy != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DrainStrategy)) + } + if m.EnableFineGrainLogging { + n += 3 + } + l = len(m.SocketPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketMode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.SocketMode)) + } + if m.EnableCoreDump { + n += 3 + } + if len(m.StatsTag) > 0 { + for _, s := range m.StatsTag { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SkipHotRestartOnNoParent { + n += 3 + } + if m.SkipHotRestartParentStats { + n += 3 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go new file mode 100644 index 000000000..f30e4500e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The /tap admin request body that is used to configure an active tap session. +type TapRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The opaque configuration ID used to match the configuration to a loaded extension. + // A tap extension configures a similar opaque ID that is used to match. + ConfigId string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // The tap configuration to load. + TapConfig *v3.TapConfig `protobuf:"bytes,2,opt,name=tap_config,json=tapConfig,proto3" json:"tap_config,omitempty"` +} + +func (x *TapRequest) Reset() { + *x = TapRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_tap_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TapRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TapRequest) ProtoMessage() {} + +func (x *TapRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_tap_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TapRequest.ProtoReflect.Descriptor instead. +func (*TapRequest) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_tap_proto_rawDescGZIP(), []int{0} +} + +func (x *TapRequest) GetConfigId() string { + if x != nil { + return x.ConfigId + } + return "" +} + +func (x *TapRequest) GetTapConfig() *v3.TapConfig { + if x != nil { + return x.TapConfig + } + return nil +} + +var File_envoy_admin_v3_tap_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_tap_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x74, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x0a, + 0x74, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x70, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x61, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x71, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_tap_proto_rawDescOnce sync.Once + file_envoy_admin_v3_tap_proto_rawDescData = file_envoy_admin_v3_tap_proto_rawDesc +) + +func file_envoy_admin_v3_tap_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_tap_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_tap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_tap_proto_rawDescData) + }) + return file_envoy_admin_v3_tap_proto_rawDescData +} + +var file_envoy_admin_v3_tap_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_tap_proto_goTypes = []interface{}{ + (*TapRequest)(nil), // 0: envoy.admin.v3.TapRequest + (*v3.TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig +} +var file_envoy_admin_v3_tap_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.TapRequest.tap_config:type_name -> envoy.config.tap.v3.TapConfig + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_tap_proto_init() } +func file_envoy_admin_v3_tap_proto_init() { + if File_envoy_admin_v3_tap_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_tap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TapRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_tap_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_tap_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_tap_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_tap_proto_msgTypes, + }.Build() + File_envoy_admin_v3_tap_proto = out.File + file_envoy_admin_v3_tap_proto_rawDesc = nil + file_envoy_admin_v3_tap_proto_goTypes = nil + file_envoy_admin_v3_tap_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go new file mode 100644 index 000000000..d524f2aef --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go @@ -0,0 +1,187 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TapRequest with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TapRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TapRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TapRequestMultiError, or +// nil if none found. +func (m *TapRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *TapRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetConfigId()) < 1 { + err := TapRequestValidationError{ + field: "ConfigId", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTapConfig() == nil { + err := TapRequestValidationError{ + field: "TapConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTapConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTapConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TapRequestMultiError(errors) + } + + return nil +} + +// TapRequestMultiError is an error wrapping multiple validation errors +// returned by TapRequest.ValidateAll() if the designated constraints aren't met. +type TapRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TapRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TapRequestMultiError) AllErrors() []error { return m } + +// TapRequestValidationError is the validation error returned by +// TapRequest.Validate if the designated constraints aren't met. +type TapRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TapRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TapRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TapRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TapRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TapRequestValidationError) ErrorName() string { return "TapRequestValidationError" } + +// Error satisfies the builtin error interface +func (e TapRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTapRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TapRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TapRequestValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go new file mode 100644 index 000000000..4524bfb4f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go @@ -0,0 +1,106 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TapConfig != nil { + if vtmsg, ok := interface{}(m.TapConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ConfigId) > 0 { + i -= len(m.ConfigId) + copy(dAtA[i:], m.ConfigId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConfigId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapConfig != nil { + if size, ok := interface{}(m.TapConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go new file mode 100644 index 000000000..258fcfe2f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/annotations/deprecation.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 189503207, + Name: "envoy.annotations.disallowed_by_default", + Tag: "varint,189503207,opt,name=disallowed_by_default", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 157299826, + Name: "envoy.annotations.deprecated_at_minor_version", + Tag: "bytes,157299826,opt,name=deprecated_at_minor_version", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70100853, + Name: "envoy.annotations.disallowed_by_default_enum", + Tag: "varint,70100853,opt,name=disallowed_by_default_enum", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 181198657, + Name: "envoy.annotations.deprecated_at_minor_version_enum", + Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum", + Filename: "envoy/annotations/deprecation.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool disallowed_by_default = 189503207; + E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0] + // The API major and minor version on which the field was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + // + // optional string deprecated_at_minor_version = 157299826; + E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional bool disallowed_by_default_enum = 70100853; + E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2] + // The API major and minor version on which the enum value was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + // + // optional string deprecated_at_minor_version_enum = 181198657; + E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3] +) + +var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor + +var file_envoy_annotations_deprecation_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69, + 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2, + 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x75, 0x6d, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions + (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions +} +var file_envoy_annotations_deprecation_proto_depIdxs = []int32{ + 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions + 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions + 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions + 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_annotations_deprecation_proto_init() } +func file_envoy_annotations_deprecation_proto_init() { + if File_envoy_annotations_deprecation_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_envoy_annotations_deprecation_proto_goTypes, + DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs, + ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes, + }.Build() + File_envoy_annotations_deprecation_proto = out.File + file_envoy_annotations_deprecation_proto_rawDesc = nil + file_envoy_annotations_deprecation_proto_goTypes = nil + file_envoy_annotations_deprecation_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go new file mode 100644 index 000000000..be58aa524 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/annotations/deprecation.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go new file mode 100644 index 000000000..828c87c5e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/annotations/resource.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource + // type. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *ResourceAnnotation) Reset() { + *x = ResourceAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_annotations_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceAnnotation) ProtoMessage() {} + +func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_annotations_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead. +func (*ResourceAnnotation) Descriptor() ([]byte, []int) { + return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceAnnotation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*ResourceAnnotation)(nil), + Field: 265073217, + Name: "envoy.annotations.resource", + Tag: "bytes,265073217,opt,name=resource", + Filename: "envoy/annotations/resource.proto", + }, +} + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // optional envoy.annotations.ResourceAnnotation resource = 265073217; + E_Resource = &file_envoy_annotations_resource_proto_extTypes[0] +) + +var File_envoy_annotations_resource_proto protoreflect.FileDescriptor + +var file_envoy_annotations_resource_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, + 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_annotations_resource_proto_rawDescOnce sync.Once + file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc +) + +func file_envoy_annotations_resource_proto_rawDescGZIP() []byte { + file_envoy_annotations_resource_proto_rawDescOnce.Do(func() { + file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData) + }) + return file_envoy_annotations_resource_proto_rawDescData +} + +var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_annotations_resource_proto_goTypes = []interface{}{ + (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation + (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions +} +var file_envoy_annotations_resource_proto_depIdxs = []int32{ + 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions + 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_annotations_resource_proto_init() } +func file_envoy_annotations_resource_proto_init() { + if File_envoy_annotations_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_annotations_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_envoy_annotations_resource_proto_goTypes, + DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs, + MessageInfos: file_envoy_annotations_resource_proto_msgTypes, + ExtensionInfos: file_envoy_annotations_resource_proto_extTypes, + }.Build() + File_envoy_annotations_resource_proto = out.File + file_envoy_annotations_resource_proto_rawDesc = nil + file_envoy_annotations_resource_proto_goTypes = nil + file_envoy_annotations_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go new file mode 100644 index 000000000..2929a5813 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go @@ -0,0 +1,141 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/annotations/resource.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceAnnotationMultiError, or nil if none found. +func (m *ResourceAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Type + + if len(errors) > 0 { + return ResourceAnnotationMultiError(errors) + } + + return nil +} + +// ResourceAnnotationMultiError is an error wrapping multiple validation errors +// returned by ResourceAnnotation.ValidateAll() if the designated constraints +// aren't met. +type ResourceAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceAnnotationMultiError) AllErrors() []error { return m } + +// ResourceAnnotationValidationError is the validation error returned by +// ResourceAnnotation.Validate if the designated constraints aren't met. +type ResourceAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceAnnotationValidationError) ErrorName() string { + return "ResourceAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceAnnotationValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go new file mode 100644 index 000000000..324cb0916 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go @@ -0,0 +1,73 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/annotations/resource.proto + +package annotations + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceAnnotation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAnnotation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceAnnotation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceAnnotation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go new file mode 100644 index 000000000..34996d497 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go @@ -0,0 +1,1924 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ComparisonFilter_Op int32 + +const ( + // = + ComparisonFilter_EQ ComparisonFilter_Op = 0 + // >= + ComparisonFilter_GE ComparisonFilter_Op = 1 + // <= + ComparisonFilter_LE ComparisonFilter_Op = 2 +) + +// Enum value maps for ComparisonFilter_Op. +var ( + ComparisonFilter_Op_name = map[int32]string{ + 0: "EQ", + 1: "GE", + 2: "LE", + } + ComparisonFilter_Op_value = map[string]int32{ + "EQ": 0, + "GE": 1, + "LE": 2, + } +) + +func (x ComparisonFilter_Op) Enum() *ComparisonFilter_Op { + p := new(ComparisonFilter_Op) + *p = x + return p +} + +func (x ComparisonFilter_Op) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ComparisonFilter_Op) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor() +} + +func (ComparisonFilter_Op) Type() protoreflect.EnumType { + return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0] +} + +func (x ComparisonFilter_Op) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ComparisonFilter_Op.Descriptor instead. +func (ComparisonFilter_Op) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2, 0} +} + +type GrpcStatusFilter_Status int32 + +const ( + GrpcStatusFilter_OK GrpcStatusFilter_Status = 0 + GrpcStatusFilter_CANCELED GrpcStatusFilter_Status = 1 + GrpcStatusFilter_UNKNOWN GrpcStatusFilter_Status = 2 + GrpcStatusFilter_INVALID_ARGUMENT GrpcStatusFilter_Status = 3 + GrpcStatusFilter_DEADLINE_EXCEEDED GrpcStatusFilter_Status = 4 + GrpcStatusFilter_NOT_FOUND GrpcStatusFilter_Status = 5 + GrpcStatusFilter_ALREADY_EXISTS GrpcStatusFilter_Status = 6 + GrpcStatusFilter_PERMISSION_DENIED GrpcStatusFilter_Status = 7 + GrpcStatusFilter_RESOURCE_EXHAUSTED GrpcStatusFilter_Status = 8 + GrpcStatusFilter_FAILED_PRECONDITION GrpcStatusFilter_Status = 9 + GrpcStatusFilter_ABORTED GrpcStatusFilter_Status = 10 + GrpcStatusFilter_OUT_OF_RANGE GrpcStatusFilter_Status = 11 + GrpcStatusFilter_UNIMPLEMENTED GrpcStatusFilter_Status = 12 + GrpcStatusFilter_INTERNAL GrpcStatusFilter_Status = 13 + GrpcStatusFilter_UNAVAILABLE GrpcStatusFilter_Status = 14 + GrpcStatusFilter_DATA_LOSS GrpcStatusFilter_Status = 15 + GrpcStatusFilter_UNAUTHENTICATED GrpcStatusFilter_Status = 16 +) + +// Enum value maps for GrpcStatusFilter_Status. +var ( + GrpcStatusFilter_Status_name = map[int32]string{ + 0: "OK", + 1: "CANCELED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + 16: "UNAUTHENTICATED", + } + GrpcStatusFilter_Status_value = map[string]int32{ + "OK": 0, + "CANCELED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + "UNAUTHENTICATED": 16, + } +) + +func (x GrpcStatusFilter_Status) Enum() *GrpcStatusFilter_Status { + p := new(GrpcStatusFilter_Status) + *p = x + return p +} + +func (x GrpcStatusFilter_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcStatusFilter_Status) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor() +} + +func (GrpcStatusFilter_Status) Type() protoreflect.EnumType { + return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1] +} + +func (x GrpcStatusFilter_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcStatusFilter_Status.Descriptor instead. +func (GrpcStatusFilter_Status) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12, 0} +} + +type AccessLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the access log extension configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Filter which is used to determine if the access log needs to be written. + Filter *AccessLogFilter `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Custom configuration that must be set according to the access logger extension being instantiated. + // [#extension-category: envoy.access_loggers] + // + // Types that are assignable to ConfigType: + // + // *AccessLog_TypedConfig + ConfigType isAccessLog_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *AccessLog) Reset() { + *x = AccessLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLog) ProtoMessage() {} + +func (x *AccessLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. +func (*AccessLog) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *AccessLog) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AccessLog) GetFilter() *AccessLogFilter { + if x != nil { + return x.Filter + } + return nil +} + +func (m *AccessLog) GetConfigType() isAccessLog_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *AccessLog) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isAccessLog_ConfigType interface { + isAccessLog_ConfigType() +} + +type AccessLog_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {} + +// [#next-free-field: 14] +type AccessLogFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to FilterSpecifier: + // + // *AccessLogFilter_StatusCodeFilter + // *AccessLogFilter_DurationFilter + // *AccessLogFilter_NotHealthCheckFilter + // *AccessLogFilter_TraceableFilter + // *AccessLogFilter_RuntimeFilter + // *AccessLogFilter_AndFilter + // *AccessLogFilter_OrFilter + // *AccessLogFilter_HeaderFilter + // *AccessLogFilter_ResponseFlagFilter + // *AccessLogFilter_GrpcStatusFilter + // *AccessLogFilter_ExtensionFilter + // *AccessLogFilter_MetadataFilter + // *AccessLogFilter_LogTypeFilter + FilterSpecifier isAccessLogFilter_FilterSpecifier `protobuf_oneof:"filter_specifier"` +} + +func (x *AccessLogFilter) Reset() { + *x = AccessLogFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLogFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLogFilter) ProtoMessage() {} + +func (x *AccessLogFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLogFilter.ProtoReflect.Descriptor instead. +func (*AccessLogFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (m *AccessLogFilter) GetFilterSpecifier() isAccessLogFilter_FilterSpecifier { + if m != nil { + return m.FilterSpecifier + } + return nil +} + +func (x *AccessLogFilter) GetStatusCodeFilter() *StatusCodeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_StatusCodeFilter); ok { + return x.StatusCodeFilter + } + return nil +} + +func (x *AccessLogFilter) GetDurationFilter() *DurationFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_DurationFilter); ok { + return x.DurationFilter + } + return nil +} + +func (x *AccessLogFilter) GetNotHealthCheckFilter() *NotHealthCheckFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_NotHealthCheckFilter); ok { + return x.NotHealthCheckFilter + } + return nil +} + +func (x *AccessLogFilter) GetTraceableFilter() *TraceableFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_TraceableFilter); ok { + return x.TraceableFilter + } + return nil +} + +func (x *AccessLogFilter) GetRuntimeFilter() *RuntimeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_RuntimeFilter); ok { + return x.RuntimeFilter + } + return nil +} + +func (x *AccessLogFilter) GetAndFilter() *AndFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_AndFilter); ok { + return x.AndFilter + } + return nil +} + +func (x *AccessLogFilter) GetOrFilter() *OrFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_OrFilter); ok { + return x.OrFilter + } + return nil +} + +func (x *AccessLogFilter) GetHeaderFilter() *HeaderFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_HeaderFilter); ok { + return x.HeaderFilter + } + return nil +} + +func (x *AccessLogFilter) GetResponseFlagFilter() *ResponseFlagFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ResponseFlagFilter); ok { + return x.ResponseFlagFilter + } + return nil +} + +func (x *AccessLogFilter) GetGrpcStatusFilter() *GrpcStatusFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_GrpcStatusFilter); ok { + return x.GrpcStatusFilter + } + return nil +} + +func (x *AccessLogFilter) GetExtensionFilter() *ExtensionFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ExtensionFilter); ok { + return x.ExtensionFilter + } + return nil +} + +func (x *AccessLogFilter) GetMetadataFilter() *MetadataFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_MetadataFilter); ok { + return x.MetadataFilter + } + return nil +} + +func (x *AccessLogFilter) GetLogTypeFilter() *LogTypeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_LogTypeFilter); ok { + return x.LogTypeFilter + } + return nil +} + +type isAccessLogFilter_FilterSpecifier interface { + isAccessLogFilter_FilterSpecifier() +} + +type AccessLogFilter_StatusCodeFilter struct { + // Status code filter. + StatusCodeFilter *StatusCodeFilter `protobuf:"bytes,1,opt,name=status_code_filter,json=statusCodeFilter,proto3,oneof"` +} + +type AccessLogFilter_DurationFilter struct { + // Duration filter. + DurationFilter *DurationFilter `protobuf:"bytes,2,opt,name=duration_filter,json=durationFilter,proto3,oneof"` +} + +type AccessLogFilter_NotHealthCheckFilter struct { + // Not health check filter. + NotHealthCheckFilter *NotHealthCheckFilter `protobuf:"bytes,3,opt,name=not_health_check_filter,json=notHealthCheckFilter,proto3,oneof"` +} + +type AccessLogFilter_TraceableFilter struct { + // Traceable filter. + TraceableFilter *TraceableFilter `protobuf:"bytes,4,opt,name=traceable_filter,json=traceableFilter,proto3,oneof"` +} + +type AccessLogFilter_RuntimeFilter struct { + // Runtime filter. + RuntimeFilter *RuntimeFilter `protobuf:"bytes,5,opt,name=runtime_filter,json=runtimeFilter,proto3,oneof"` +} + +type AccessLogFilter_AndFilter struct { + // And filter. + AndFilter *AndFilter `protobuf:"bytes,6,opt,name=and_filter,json=andFilter,proto3,oneof"` +} + +type AccessLogFilter_OrFilter struct { + // Or filter. + OrFilter *OrFilter `protobuf:"bytes,7,opt,name=or_filter,json=orFilter,proto3,oneof"` +} + +type AccessLogFilter_HeaderFilter struct { + // Header filter. + HeaderFilter *HeaderFilter `protobuf:"bytes,8,opt,name=header_filter,json=headerFilter,proto3,oneof"` +} + +type AccessLogFilter_ResponseFlagFilter struct { + // Response flag filter. + ResponseFlagFilter *ResponseFlagFilter `protobuf:"bytes,9,opt,name=response_flag_filter,json=responseFlagFilter,proto3,oneof"` +} + +type AccessLogFilter_GrpcStatusFilter struct { + // gRPC status filter. + GrpcStatusFilter *GrpcStatusFilter `protobuf:"bytes,10,opt,name=grpc_status_filter,json=grpcStatusFilter,proto3,oneof"` +} + +type AccessLogFilter_ExtensionFilter struct { + // Extension filter. + // [#extension-category: envoy.access_loggers.extension_filters] + ExtensionFilter *ExtensionFilter `protobuf:"bytes,11,opt,name=extension_filter,json=extensionFilter,proto3,oneof"` +} + +type AccessLogFilter_MetadataFilter struct { + // Metadata Filter + MetadataFilter *MetadataFilter `protobuf:"bytes,12,opt,name=metadata_filter,json=metadataFilter,proto3,oneof"` +} + +type AccessLogFilter_LogTypeFilter struct { + // Log Type Filter + LogTypeFilter *LogTypeFilter `protobuf:"bytes,13,opt,name=log_type_filter,json=logTypeFilter,proto3,oneof"` +} + +func (*AccessLogFilter_StatusCodeFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_DurationFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_NotHealthCheckFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_TraceableFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_RuntimeFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_AndFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_OrFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_HeaderFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_ResponseFlagFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_GrpcStatusFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_ExtensionFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_MetadataFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_LogTypeFilter) isAccessLogFilter_FilterSpecifier() {} + +// Filter on an integer comparison. +type ComparisonFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison operator. + Op ComparisonFilter_Op `protobuf:"varint,1,opt,name=op,proto3,enum=envoy.config.accesslog.v3.ComparisonFilter_Op" json:"op,omitempty"` + // Value to compare against. + Value *v3.RuntimeUInt32 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ComparisonFilter) Reset() { + *x = ComparisonFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComparisonFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComparisonFilter) ProtoMessage() {} + +func (x *ComparisonFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComparisonFilter.ProtoReflect.Descriptor instead. +func (*ComparisonFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *ComparisonFilter) GetOp() ComparisonFilter_Op { + if x != nil { + return x.Op + } + return ComparisonFilter_EQ +} + +func (x *ComparisonFilter) GetValue() *v3.RuntimeUInt32 { + if x != nil { + return x.Value + } + return nil +} + +// Filters on HTTP response/status code. +type StatusCodeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison. + Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"` +} + +func (x *StatusCodeFilter) Reset() { + *x = StatusCodeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusCodeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusCodeFilter) ProtoMessage() {} + +func (x *StatusCodeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusCodeFilter.ProtoReflect.Descriptor instead. +func (*StatusCodeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusCodeFilter) GetComparison() *ComparisonFilter { + if x != nil { + return x.Comparison + } + return nil +} + +// Filters based on the duration of the request or stream, in milliseconds. +// For end of stream access logs, the total duration of the stream will be used. +// For :ref:`periodic access logs`, +// the duration of the stream at the time of log recording will be used. +type DurationFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison. + Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"` +} + +func (x *DurationFilter) Reset() { + *x = DurationFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurationFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationFilter) ProtoMessage() {} + +func (x *DurationFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurationFilter.ProtoReflect.Descriptor instead. +func (*DurationFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *DurationFilter) GetComparison() *ComparisonFilter { + if x != nil { + return x.Comparison + } + return nil +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +type NotHealthCheckFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotHealthCheckFilter) Reset() { + *x = NotHealthCheckFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotHealthCheckFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotHealthCheckFilter) ProtoMessage() {} + +func (x *NotHealthCheckFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotHealthCheckFilter.ProtoReflect.Descriptor instead. +func (*NotHealthCheckFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5} +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +type TraceableFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *TraceableFilter) Reset() { + *x = TraceableFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceableFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceableFilter) ProtoMessage() {} + +func (x *TraceableFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceableFilter.ProtoReflect.Descriptor instead. +func (*TraceableFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6} +} + +// Filters for random sampling of requests. +type RuntimeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Runtime key to get an optional overridden numerator for use in the + // “percent_sampled“ field. If found in runtime, this value will replace the + // default numerator. + RuntimeKey string `protobuf:"bytes,1,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. + PercentSampled *v31.FractionalPercent `protobuf:"bytes,2,opt,name=percent_sampled,json=percentSampled,proto3" json:"percent_sampled,omitempty"` + // By default, sampling pivots on the header + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or “use_independent_randomness“ is set to true, the filter will + // randomly sample based on the runtime key value alone. + // “use_independent_randomness“ can be used for logging kill switches within + // complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). + UseIndependentRandomness bool `protobuf:"varint,3,opt,name=use_independent_randomness,json=useIndependentRandomness,proto3" json:"use_independent_randomness,omitempty"` +} + +func (x *RuntimeFilter) Reset() { + *x = RuntimeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFilter) ProtoMessage() {} + +func (x *RuntimeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFilter.ProtoReflect.Descriptor instead. +func (*RuntimeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeFilter) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +func (x *RuntimeFilter) GetPercentSampled() *v31.FractionalPercent { + if x != nil { + return x.PercentSampled + } + return nil +} + +func (x *RuntimeFilter) GetUseIndependentRandomness() bool { + if x != nil { + return x.UseIndependentRandomness + } + return false +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +type AndFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []*AccessLogFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *AndFilter) Reset() { + *x = AndFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AndFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AndFilter) ProtoMessage() {} + +func (x *AndFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AndFilter.ProtoReflect.Descriptor instead. +func (*AndFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{8} +} + +func (x *AndFilter) GetFilters() []*AccessLogFilter { + if x != nil { + return x.Filters + } + return nil +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +type OrFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []*AccessLogFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *OrFilter) Reset() { + *x = OrFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrFilter) ProtoMessage() {} + +func (x *OrFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrFilter.ProtoReflect.Descriptor instead. +func (*OrFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{9} +} + +func (x *OrFilter) GetFilters() []*AccessLogFilter { + if x != nil { + return x.Filters + } + return nil +} + +// Filters requests based on the presence or value of a request header. +type HeaderFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. + Header *v32.HeaderMatcher `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` +} + +func (x *HeaderFilter) Reset() { + *x = HeaderFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderFilter) ProtoMessage() {} + +func (x *HeaderFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderFilter.ProtoReflect.Descriptor instead. +func (*HeaderFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{10} +} + +func (x *HeaderFilter) GetHeader() *v32.HeaderMatcher { + if x != nil { + return x.Header + } + return nil +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter +// :ref:`documentation`. +type ResponseFlagFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. + Flags []string `protobuf:"bytes,1,rep,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *ResponseFlagFilter) Reset() { + *x = ResponseFlagFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlagFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlagFilter) ProtoMessage() {} + +func (x *ResponseFlagFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlagFilter.ProtoReflect.Descriptor instead. +func (*ResponseFlagFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{11} +} + +func (x *ResponseFlagFilter) GetFlags() []string { + if x != nil { + return x.Flags + } + return nil +} + +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. +type GrpcStatusFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Logs only responses that have any one of the gRPC statuses in this field. + Statuses []GrpcStatusFilter_Status `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.accesslog.v3.GrpcStatusFilter_Status" json:"statuses,omitempty"` + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. + Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"` +} + +func (x *GrpcStatusFilter) Reset() { + *x = GrpcStatusFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcStatusFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcStatusFilter) ProtoMessage() {} + +func (x *GrpcStatusFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcStatusFilter.ProtoReflect.Descriptor instead. +func (*GrpcStatusFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12} +} + +func (x *GrpcStatusFilter) GetStatuses() []GrpcStatusFilter_Status { + if x != nil { + return x.Statuses + } + return nil +} + +func (x *GrpcStatusFilter) GetExclude() bool { + if x != nil { + return x.Exclude + } + return false +} + +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +type MetadataFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + Matcher *v33.MetadataMatcher `protobuf:"bytes,1,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + MatchIfKeyNotFound *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"` +} + +func (x *MetadataFilter) Reset() { + *x = MetadataFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataFilter) ProtoMessage() {} + +func (x *MetadataFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataFilter.ProtoReflect.Descriptor instead. +func (*MetadataFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{13} +} + +func (x *MetadataFilter) GetMatcher() *v33.MetadataMatcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrapperspb.BoolValue { + if x != nil { + return x.MatchIfKeyNotFound + } + return nil +} + +// Filters based on access log type. +type LogTypeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Logs only records which their type is one of the types defined in this field. + Types []v34.AccessLogType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"types,omitempty"` + // If this field is set to true, the filter will instead block all records + // with a access log type in types field, and allow all other records. + Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"` +} + +func (x *LogTypeFilter) Reset() { + *x = LogTypeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogTypeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogTypeFilter) ProtoMessage() {} + +func (x *LogTypeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogTypeFilter.ProtoReflect.Descriptor instead. +func (*LogTypeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14} +} + +func (x *LogTypeFilter) GetTypes() []v34.AccessLogType { + if x != nil { + return x.Types + } + return nil +} + +func (x *LogTypeFilter) GetExclude() bool { + if x != nil { + return x.Exclude + } + return false +} + +// Extension filter is statically registered at runtime. +type ExtensionFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Custom configuration that depends on the filter being instantiated. + // + // Types that are assignable to ConfigType: + // + // *ExtensionFilter_TypedConfig + ConfigType isExtensionFilter_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *ExtensionFilter) Reset() { + *x = ExtensionFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionFilter) ProtoMessage() {} + +func (x *ExtensionFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionFilter.ProtoReflect.Descriptor instead. +func (*ExtensionFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{15} +} + +func (x *ExtensionFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ExtensionFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isExtensionFilter_ConfigType interface { + isExtensionFilter_ConfigType() +} + +type ExtensionFilter_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {} + +var File_envoy_config_accesslog_v3_accesslog_proto protoreflect.FileDescriptor + +var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, + 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, + 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0f, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09, + 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, + 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, + 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x52, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x17, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x02, 0x6f, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, 0x4f, 0x70, 0x12, + 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x01, 0x12, + 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x38, 0x9a, + 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, + 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x0d, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, + 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x70, + 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, 0x6e, 0x64, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, + 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x4f, 0x72, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa7, 0x01, + 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x90, 0x01, + 0xfa, 0x42, 0x8c, 0x01, 0x92, 0x01, 0x88, 0x01, 0x22, 0x85, 0x01, 0x72, 0x82, 0x01, 0x52, 0x02, + 0x4c, 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, + 0x55, 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, + 0x4e, 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, + 0x55, 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, + 0x55, 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, + 0x52, 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, + 0x46, 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, + 0x52, 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x02, 0x44, 0x52, + 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, + 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, + 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, + 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, + 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, + 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, + 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66, + 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88, + 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, + 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce sync.Once + file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = file_envoy_config_accesslog_v3_accesslog_proto_rawDesc +) + +func file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { + file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() { + file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_accesslog_v3_accesslog_proto_rawDescData) + }) + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescData +} + +var file_envoy_config_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{ + (ComparisonFilter_Op)(0), // 0: envoy.config.accesslog.v3.ComparisonFilter.Op + (GrpcStatusFilter_Status)(0), // 1: envoy.config.accesslog.v3.GrpcStatusFilter.Status + (*AccessLog)(nil), // 2: envoy.config.accesslog.v3.AccessLog + (*AccessLogFilter)(nil), // 3: envoy.config.accesslog.v3.AccessLogFilter + (*ComparisonFilter)(nil), // 4: envoy.config.accesslog.v3.ComparisonFilter + (*StatusCodeFilter)(nil), // 5: envoy.config.accesslog.v3.StatusCodeFilter + (*DurationFilter)(nil), // 6: envoy.config.accesslog.v3.DurationFilter + (*NotHealthCheckFilter)(nil), // 7: envoy.config.accesslog.v3.NotHealthCheckFilter + (*TraceableFilter)(nil), // 8: envoy.config.accesslog.v3.TraceableFilter + (*RuntimeFilter)(nil), // 9: envoy.config.accesslog.v3.RuntimeFilter + (*AndFilter)(nil), // 10: envoy.config.accesslog.v3.AndFilter + (*OrFilter)(nil), // 11: envoy.config.accesslog.v3.OrFilter + (*HeaderFilter)(nil), // 12: envoy.config.accesslog.v3.HeaderFilter + (*ResponseFlagFilter)(nil), // 13: envoy.config.accesslog.v3.ResponseFlagFilter + (*GrpcStatusFilter)(nil), // 14: envoy.config.accesslog.v3.GrpcStatusFilter + (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter + (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter + (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter + (*anypb.Any)(nil), // 18: google.protobuf.Any + (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32 + (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent + (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher + (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher + (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue + (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType +} +var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{ + 3, // 0: envoy.config.accesslog.v3.AccessLog.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 18, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any + 5, // 2: envoy.config.accesslog.v3.AccessLogFilter.status_code_filter:type_name -> envoy.config.accesslog.v3.StatusCodeFilter + 6, // 3: envoy.config.accesslog.v3.AccessLogFilter.duration_filter:type_name -> envoy.config.accesslog.v3.DurationFilter + 7, // 4: envoy.config.accesslog.v3.AccessLogFilter.not_health_check_filter:type_name -> envoy.config.accesslog.v3.NotHealthCheckFilter + 8, // 5: envoy.config.accesslog.v3.AccessLogFilter.traceable_filter:type_name -> envoy.config.accesslog.v3.TraceableFilter + 9, // 6: envoy.config.accesslog.v3.AccessLogFilter.runtime_filter:type_name -> envoy.config.accesslog.v3.RuntimeFilter + 10, // 7: envoy.config.accesslog.v3.AccessLogFilter.and_filter:type_name -> envoy.config.accesslog.v3.AndFilter + 11, // 8: envoy.config.accesslog.v3.AccessLogFilter.or_filter:type_name -> envoy.config.accesslog.v3.OrFilter + 12, // 9: envoy.config.accesslog.v3.AccessLogFilter.header_filter:type_name -> envoy.config.accesslog.v3.HeaderFilter + 13, // 10: envoy.config.accesslog.v3.AccessLogFilter.response_flag_filter:type_name -> envoy.config.accesslog.v3.ResponseFlagFilter + 14, // 11: envoy.config.accesslog.v3.AccessLogFilter.grpc_status_filter:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter + 17, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter + 15, // 13: envoy.config.accesslog.v3.AccessLogFilter.metadata_filter:type_name -> envoy.config.accesslog.v3.MetadataFilter + 16, // 14: envoy.config.accesslog.v3.AccessLogFilter.log_type_filter:type_name -> envoy.config.accesslog.v3.LogTypeFilter + 0, // 15: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op + 19, // 16: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32 + 4, // 17: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 4, // 18: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 20, // 19: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent + 3, // 20: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 3, // 21: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 21, // 22: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher + 1, // 23: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status + 22, // 24: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher + 23, // 25: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue + 24, // 26: envoy.config.accesslog.v3.LogTypeFilter.types:type_name -> envoy.data.accesslog.v3.AccessLogType + 18, // 27: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name +} + +func init() { file_envoy_config_accesslog_v3_accesslog_proto_init() } +func file_envoy_config_accesslog_v3_accesslog_proto_init() { + if File_envoy_config_accesslog_v3_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLogFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComparisonFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusCodeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurationFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotHealthCheckFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceableFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AndFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlagFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcStatusFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogTypeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AccessLog_TypedConfig)(nil), + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AccessLogFilter_StatusCodeFilter)(nil), + (*AccessLogFilter_DurationFilter)(nil), + (*AccessLogFilter_NotHealthCheckFilter)(nil), + (*AccessLogFilter_TraceableFilter)(nil), + (*AccessLogFilter_RuntimeFilter)(nil), + (*AccessLogFilter_AndFilter)(nil), + (*AccessLogFilter_OrFilter)(nil), + (*AccessLogFilter_HeaderFilter)(nil), + (*AccessLogFilter_ResponseFlagFilter)(nil), + (*AccessLogFilter_GrpcStatusFilter)(nil), + (*AccessLogFilter_ExtensionFilter)(nil), + (*AccessLogFilter_MetadataFilter)(nil), + (*AccessLogFilter_LogTypeFilter)(nil), + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*ExtensionFilter_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_accesslog_v3_accesslog_proto_rawDesc, + NumEnums: 2, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_accesslog_v3_accesslog_proto_goTypes, + DependencyIndexes: file_envoy_config_accesslog_v3_accesslog_proto_depIdxs, + EnumInfos: file_envoy_config_accesslog_v3_accesslog_proto_enumTypes, + MessageInfos: file_envoy_config_accesslog_v3_accesslog_proto_msgTypes, + }.Build() + File_envoy_config_accesslog_v3_accesslog_proto = out.File + file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = nil + file_envoy_config_accesslog_v3_accesslog_proto_goTypes = nil + file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go new file mode 100644 index 000000000..746f6f2c4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go @@ -0,0 +1,2773 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.AccessLogType(0) +) + +// Validate checks the field values on AccessLog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AccessLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AccessLogMultiError, or nil +// if none found. +func (m *AccessLog) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ConfigType.(type) { + case *AccessLog_TypedConfig: + if v == nil { + err := AccessLogValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return AccessLogMultiError(errors) + } + + return nil +} + +// AccessLogMultiError is an error wrapping multiple validation errors returned +// by AccessLog.ValidateAll() if the designated constraints aren't met. +type AccessLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogMultiError) AllErrors() []error { return m } + +// AccessLogValidationError is the validation error returned by +// AccessLog.Validate if the designated constraints aren't met. +type AccessLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogValidationError) ErrorName() string { return "AccessLogValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogValidationError{} + +// Validate checks the field values on AccessLogFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AccessLogFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLogFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AccessLogFilterMultiError, or nil if none found. +func (m *AccessLogFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLogFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofFilterSpecifierPresent := false + switch v := m.FilterSpecifier.(type) { + case *AccessLogFilter_StatusCodeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStatusCodeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatusCodeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_DurationFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDurationFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDurationFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_NotHealthCheckFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetNotHealthCheckFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotHealthCheckFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_TraceableFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetTraceableFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceableFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_RuntimeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRuntimeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_AndFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAndFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_OrFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetOrFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_HeaderFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_ResponseFlagFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetResponseFlagFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseFlagFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_GrpcStatusFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGrpcStatusFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcStatusFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_ExtensionFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetExtensionFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtensionFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_MetadataFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMetadataFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_LogTypeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLogTypeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogTypeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFilterSpecifierPresent { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AccessLogFilterMultiError(errors) + } + + return nil +} + +// AccessLogFilterMultiError is an error wrapping multiple validation errors +// returned by AccessLogFilter.ValidateAll() if the designated constraints +// aren't met. +type AccessLogFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogFilterMultiError) AllErrors() []error { return m } + +// AccessLogFilterValidationError is the validation error returned by +// AccessLogFilter.Validate if the designated constraints aren't met. +type AccessLogFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogFilterValidationError) ErrorName() string { return "AccessLogFilterValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLogFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogFilterValidationError{} + +// Validate checks the field values on ComparisonFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ComparisonFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ComparisonFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ComparisonFilterMultiError, or nil if none found. +func (m *ComparisonFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ComparisonFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ComparisonFilter_Op_name[int32(m.GetOp())]; !ok { + err := ComparisonFilterValidationError{ + field: "Op", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetValue() == nil { + err := ComparisonFilterValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ComparisonFilterMultiError(errors) + } + + return nil +} + +// ComparisonFilterMultiError is an error wrapping multiple validation errors +// returned by ComparisonFilter.ValidateAll() if the designated constraints +// aren't met. +type ComparisonFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ComparisonFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ComparisonFilterMultiError) AllErrors() []error { return m } + +// ComparisonFilterValidationError is the validation error returned by +// ComparisonFilter.Validate if the designated constraints aren't met. +type ComparisonFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ComparisonFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ComparisonFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ComparisonFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ComparisonFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ComparisonFilterValidationError) ErrorName() string { return "ComparisonFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ComparisonFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sComparisonFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ComparisonFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ComparisonFilterValidationError{} + +// Validate checks the field values on StatusCodeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusCodeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusCodeFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusCodeFilterMultiError, or nil if none found. +func (m *StatusCodeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusCodeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetComparison() == nil { + err := StatusCodeFilterValidationError{ + field: "Comparison", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetComparison()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StatusCodeFilterMultiError(errors) + } + + return nil +} + +// StatusCodeFilterMultiError is an error wrapping multiple validation errors +// returned by StatusCodeFilter.ValidateAll() if the designated constraints +// aren't met. +type StatusCodeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusCodeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusCodeFilterMultiError) AllErrors() []error { return m } + +// StatusCodeFilterValidationError is the validation error returned by +// StatusCodeFilter.Validate if the designated constraints aren't met. +type StatusCodeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusCodeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusCodeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusCodeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusCodeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusCodeFilterValidationError) ErrorName() string { return "StatusCodeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e StatusCodeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusCodeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusCodeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusCodeFilterValidationError{} + +// Validate checks the field values on DurationFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DurationFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DurationFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DurationFilterMultiError, +// or nil if none found. +func (m *DurationFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *DurationFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetComparison() == nil { + err := DurationFilterValidationError{ + field: "Comparison", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetComparison()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DurationFilterMultiError(errors) + } + + return nil +} + +// DurationFilterMultiError is an error wrapping multiple validation errors +// returned by DurationFilter.ValidateAll() if the designated constraints +// aren't met. +type DurationFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DurationFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DurationFilterMultiError) AllErrors() []error { return m } + +// DurationFilterValidationError is the validation error returned by +// DurationFilter.Validate if the designated constraints aren't met. +type DurationFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DurationFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DurationFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DurationFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DurationFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DurationFilterValidationError) ErrorName() string { return "DurationFilterValidationError" } + +// Error satisfies the builtin error interface +func (e DurationFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDurationFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DurationFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DurationFilterValidationError{} + +// Validate checks the field values on NotHealthCheckFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NotHealthCheckFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NotHealthCheckFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NotHealthCheckFilterMultiError, or nil if none found. +func (m *NotHealthCheckFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *NotHealthCheckFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return NotHealthCheckFilterMultiError(errors) + } + + return nil +} + +// NotHealthCheckFilterMultiError is an error wrapping multiple validation +// errors returned by NotHealthCheckFilter.ValidateAll() if the designated +// constraints aren't met. +type NotHealthCheckFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NotHealthCheckFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NotHealthCheckFilterMultiError) AllErrors() []error { return m } + +// NotHealthCheckFilterValidationError is the validation error returned by +// NotHealthCheckFilter.Validate if the designated constraints aren't met. +type NotHealthCheckFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NotHealthCheckFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NotHealthCheckFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NotHealthCheckFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NotHealthCheckFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NotHealthCheckFilterValidationError) ErrorName() string { + return "NotHealthCheckFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e NotHealthCheckFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNotHealthCheckFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NotHealthCheckFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NotHealthCheckFilterValidationError{} + +// Validate checks the field values on TraceableFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TraceableFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TraceableFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TraceableFilterMultiError, or nil if none found. +func (m *TraceableFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *TraceableFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return TraceableFilterMultiError(errors) + } + + return nil +} + +// TraceableFilterMultiError is an error wrapping multiple validation errors +// returned by TraceableFilter.ValidateAll() if the designated constraints +// aren't met. +type TraceableFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TraceableFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TraceableFilterMultiError) AllErrors() []error { return m } + +// TraceableFilterValidationError is the validation error returned by +// TraceableFilter.Validate if the designated constraints aren't met. +type TraceableFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TraceableFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TraceableFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TraceableFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TraceableFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TraceableFilterValidationError) ErrorName() string { return "TraceableFilterValidationError" } + +// Error satisfies the builtin error interface +func (e TraceableFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTraceableFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TraceableFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TraceableFilterValidationError{} + +// Validate checks the field values on RuntimeFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeFilterMultiError, or +// nil if none found. +func (m *RuntimeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeFilterValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPercentSampled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentSampled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UseIndependentRandomness + + if len(errors) > 0 { + return RuntimeFilterMultiError(errors) + } + + return nil +} + +// RuntimeFilterMultiError is an error wrapping multiple validation errors +// returned by RuntimeFilter.ValidateAll() if the designated constraints +// aren't met. +type RuntimeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFilterMultiError) AllErrors() []error { return m } + +// RuntimeFilterValidationError is the validation error returned by +// RuntimeFilter.Validate if the designated constraints aren't met. +type RuntimeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFilterValidationError) ErrorName() string { return "RuntimeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFilterValidationError{} + +// Validate checks the field values on AndFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AndFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AndFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AndFilterMultiError, or nil +// if none found. +func (m *AndFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *AndFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFilters()) < 2 { + err := AndFilterValidationError{ + field: "Filters", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return AndFilterMultiError(errors) + } + + return nil +} + +// AndFilterMultiError is an error wrapping multiple validation errors returned +// by AndFilter.ValidateAll() if the designated constraints aren't met. +type AndFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AndFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AndFilterMultiError) AllErrors() []error { return m } + +// AndFilterValidationError is the validation error returned by +// AndFilter.Validate if the designated constraints aren't met. +type AndFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AndFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AndFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AndFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AndFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AndFilterValidationError) ErrorName() string { return "AndFilterValidationError" } + +// Error satisfies the builtin error interface +func (e AndFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAndFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AndFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AndFilterValidationError{} + +// Validate checks the field values on OrFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrFilterMultiError, or nil +// if none found. +func (m *OrFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *OrFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFilters()) < 2 { + err := OrFilterValidationError{ + field: "Filters", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return OrFilterMultiError(errors) + } + + return nil +} + +// OrFilterMultiError is an error wrapping multiple validation errors returned +// by OrFilter.ValidateAll() if the designated constraints aren't met. +type OrFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrFilterMultiError) AllErrors() []error { return m } + +// OrFilterValidationError is the validation error returned by +// OrFilter.Validate if the designated constraints aren't met. +type OrFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrFilterValidationError) ErrorName() string { return "OrFilterValidationError" } + +// Error satisfies the builtin error interface +func (e OrFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrFilterValidationError{} + +// Validate checks the field values on HeaderFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderFilterMultiError, or +// nil if none found. +func (m *HeaderFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHeader() == nil { + err := HeaderFilterValidationError{ + field: "Header", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HeaderFilterMultiError(errors) + } + + return nil +} + +// HeaderFilterMultiError is an error wrapping multiple validation errors +// returned by HeaderFilter.ValidateAll() if the designated constraints aren't met. +type HeaderFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderFilterMultiError) AllErrors() []error { return m } + +// HeaderFilterValidationError is the validation error returned by +// HeaderFilter.Validate if the designated constraints aren't met. +type HeaderFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderFilterValidationError) ErrorName() string { return "HeaderFilterValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderFilterValidationError{} + +// Validate checks the field values on ResponseFlagFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResponseFlagFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlagFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResponseFlagFilterMultiError, or nil if none found. +func (m *ResponseFlagFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlagFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetFlags() { + _, _ = idx, item + + if _, ok := _ResponseFlagFilter_Flags_InLookup[item]; !ok { + err := ResponseFlagFilterValidationError{ + field: fmt.Sprintf("Flags[%v]", idx), + reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO DR]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ResponseFlagFilterMultiError(errors) + } + + return nil +} + +// ResponseFlagFilterMultiError is an error wrapping multiple validation errors +// returned by ResponseFlagFilter.ValidateAll() if the designated constraints +// aren't met. +type ResponseFlagFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlagFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlagFilterMultiError) AllErrors() []error { return m } + +// ResponseFlagFilterValidationError is the validation error returned by +// ResponseFlagFilter.Validate if the designated constraints aren't met. +type ResponseFlagFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlagFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlagFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlagFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlagFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlagFilterValidationError) ErrorName() string { + return "ResponseFlagFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e ResponseFlagFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlagFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlagFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlagFilterValidationError{} + +var _ResponseFlagFilter_Flags_InLookup = map[string]struct{}{ + "LH": {}, + "UH": {}, + "UT": {}, + "LR": {}, + "UR": {}, + "UF": {}, + "UC": {}, + "UO": {}, + "NR": {}, + "DI": {}, + "FI": {}, + "RL": {}, + "UAEX": {}, + "RLSE": {}, + "DC": {}, + "URX": {}, + "SI": {}, + "IH": {}, + "DPE": {}, + "UMSDR": {}, + "RFCF": {}, + "NFCF": {}, + "DT": {}, + "UPE": {}, + "NC": {}, + "OM": {}, + "DF": {}, + "DO": {}, + "DR": {}, +} + +// Validate checks the field values on GrpcStatusFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GrpcStatusFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcStatusFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcStatusFilterMultiError, or nil if none found. +func (m *GrpcStatusFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcStatusFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatuses() { + _, _ = idx, item + + if _, ok := GrpcStatusFilter_Status_name[int32(item)]; !ok { + err := GrpcStatusFilterValidationError{ + field: fmt.Sprintf("Statuses[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for Exclude + + if len(errors) > 0 { + return GrpcStatusFilterMultiError(errors) + } + + return nil +} + +// GrpcStatusFilterMultiError is an error wrapping multiple validation errors +// returned by GrpcStatusFilter.ValidateAll() if the designated constraints +// aren't met. +type GrpcStatusFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcStatusFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcStatusFilterMultiError) AllErrors() []error { return m } + +// GrpcStatusFilterValidationError is the validation error returned by +// GrpcStatusFilter.Validate if the designated constraints aren't met. +type GrpcStatusFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcStatusFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcStatusFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcStatusFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcStatusFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcStatusFilterValidationError) ErrorName() string { return "GrpcStatusFilterValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcStatusFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcStatusFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcStatusFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcStatusFilterValidationError{} + +// Validate checks the field values on MetadataFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataFilterMultiError, +// or nil if none found. +func (m *MetadataFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMatchIfKeyNotFound()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatchIfKeyNotFound()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MetadataFilterMultiError(errors) + } + + return nil +} + +// MetadataFilterMultiError is an error wrapping multiple validation errors +// returned by MetadataFilter.ValidateAll() if the designated constraints +// aren't met. +type MetadataFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataFilterMultiError) AllErrors() []error { return m } + +// MetadataFilterValidationError is the validation error returned by +// MetadataFilter.Validate if the designated constraints aren't met. +type MetadataFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataFilterValidationError) ErrorName() string { return "MetadataFilterValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataFilterValidationError{} + +// Validate checks the field values on LogTypeFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogTypeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogTypeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogTypeFilterMultiError, or +// nil if none found. +func (m *LogTypeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *LogTypeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetTypes() { + _, _ = idx, item + + if _, ok := v3.AccessLogType_name[int32(item)]; !ok { + err := LogTypeFilterValidationError{ + field: fmt.Sprintf("Types[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for Exclude + + if len(errors) > 0 { + return LogTypeFilterMultiError(errors) + } + + return nil +} + +// LogTypeFilterMultiError is an error wrapping multiple validation errors +// returned by LogTypeFilter.ValidateAll() if the designated constraints +// aren't met. +type LogTypeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogTypeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogTypeFilterMultiError) AllErrors() []error { return m } + +// LogTypeFilterValidationError is the validation error returned by +// LogTypeFilter.Validate if the designated constraints aren't met. +type LogTypeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogTypeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogTypeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogTypeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogTypeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogTypeFilterValidationError) ErrorName() string { return "LogTypeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e LogTypeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogTypeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogTypeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogTypeFilterValidationError{} + +// Validate checks the field values on ExtensionFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ExtensionFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtensionFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtensionFilterMultiError, or nil if none found. +func (m *ExtensionFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtensionFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *ExtensionFilter_TypedConfig: + if v == nil { + err := ExtensionFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ExtensionFilterMultiError(errors) + } + + return nil +} + +// ExtensionFilterMultiError is an error wrapping multiple validation errors +// returned by ExtensionFilter.ValidateAll() if the designated constraints +// aren't met. +type ExtensionFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionFilterMultiError) AllErrors() []error { return m } + +// ExtensionFilterValidationError is the validation error returned by +// ExtensionFilter.Validate if the designated constraints aren't met. +type ExtensionFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionFilterValidationError) ErrorName() string { return "ExtensionFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ExtensionFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtensionFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionFilterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 000000000..e75bf014a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,1751 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AccessLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*AccessLog_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Filter != nil { + size, err := m.Filter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AccessLog_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_LogTypeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_MetadataFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ExtensionFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_GrpcStatusFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ResponseFlagFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_HeaderFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_OrFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_AndFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_RuntimeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_TraceableFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_NotHealthCheckFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_DurationFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_StatusCodeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusCodeFilter != nil { + size, err := m.StatusCodeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DurationFilter != nil { + size, err := m.DurationFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotHealthCheckFilter != nil { + size, err := m.NotHealthCheckFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TraceableFilter != nil { + size, err := m.TraceableFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RuntimeFilter != nil { + size, err := m.RuntimeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndFilter != nil { + size, err := m.AndFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrFilter != nil { + size, err := m.OrFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderFilter != nil { + size, err := m.HeaderFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseFlagFilter != nil { + size, err := m.ResponseFlagFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcStatusFilter != nil { + size, err := m.GrpcStatusFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtensionFilter != nil { + size, err := m.ExtensionFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MetadataFilter != nil { + size, err := m.MetadataFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LogTypeFilter != nil { + size, err := m.LogTypeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *ComparisonFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComparisonFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ComparisonFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + if vtmsg, ok := interface{}(m.Value).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Value) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Op != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Op)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusCodeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DurationFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NotHealthCheckFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *TraceableFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseIndependentRandomness { + i-- + if m.UseIndependentRandomness { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.PercentSampled != nil { + if vtmsg, ok := interface{}(m.PercentSampled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PercentSampled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AndFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OrFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *HeaderFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlagFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Flags) > 0 { + for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Flags[iNdEx]) + copy(dAtA[i:], m.Flags[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Flags[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcStatusFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchIfKeyNotFound != nil { + size, err := (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LogTypeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Types) > 0 { + var pksize2 int + for _, num := range m.Types { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Types { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ExtensionFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLog_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FilterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogFilter_StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusCodeFilter != nil { + l = m.StatusCodeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DurationFilter != nil { + l = m.DurationFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotHealthCheckFilter != nil { + l = m.NotHealthCheckFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceableFilter != nil { + l = m.TraceableFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RuntimeFilter != nil { + l = m.RuntimeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndFilter != nil { + l = m.AndFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrFilter != nil { + l = m.OrFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderFilter != nil { + l = m.HeaderFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseFlagFilter != nil { + l = m.ResponseFlagFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcStatusFilter != nil { + l = m.GrpcStatusFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtensionFilter != nil { + l = m.ExtensionFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataFilter != nil { + l = m.MetadataFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogTypeFilter != nil { + l = m.LogTypeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ComparisonFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Op != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Op)) + } + if m.Value != nil { + if size, ok := interface{}(m.Value).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Value) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PercentSampled != nil { + if size, ok := interface{}(m.PercentSampled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PercentSampled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseIndependentRandomness { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Flags) > 0 { + for _, s := range m.Flags { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MatchIfKeyNotFound != nil { + l = (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Types) > 0 { + l = 0 + for _, e := range m.Types { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go new file mode 100644 index 000000000..893acf21e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go @@ -0,0 +1,3310 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v34 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v37 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v36 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/config/overload/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3" + v38 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. +// Within an event type, actions execute in the order they are configured. +// For KILL/MULTIKILL there is a default PANIC that will run after the +// registered actions and kills the process if it wasn't already killed. +// It might be useful to specify several debug actions, and possibly an +// alternate FATAL action. +type Watchdog_WatchdogAction_WatchdogEvent int32 + +const ( + Watchdog_WatchdogAction_UNKNOWN Watchdog_WatchdogAction_WatchdogEvent = 0 + Watchdog_WatchdogAction_KILL Watchdog_WatchdogAction_WatchdogEvent = 1 + Watchdog_WatchdogAction_MULTIKILL Watchdog_WatchdogAction_WatchdogEvent = 2 + Watchdog_WatchdogAction_MEGAMISS Watchdog_WatchdogAction_WatchdogEvent = 3 + Watchdog_WatchdogAction_MISS Watchdog_WatchdogAction_WatchdogEvent = 4 +) + +// Enum value maps for Watchdog_WatchdogAction_WatchdogEvent. +var ( + Watchdog_WatchdogAction_WatchdogEvent_name = map[int32]string{ + 0: "UNKNOWN", + 1: "KILL", + 2: "MULTIKILL", + 3: "MEGAMISS", + 4: "MISS", + } + Watchdog_WatchdogAction_WatchdogEvent_value = map[string]int32{ + "UNKNOWN": 0, + "KILL": 1, + "MULTIKILL": 2, + "MEGAMISS": 3, + "MISS": 4, + } +) + +func (x Watchdog_WatchdogAction_WatchdogEvent) Enum() *Watchdog_WatchdogAction_WatchdogEvent { + p := new(Watchdog_WatchdogAction_WatchdogEvent) + *p = x + return p +} + +func (x Watchdog_WatchdogAction_WatchdogEvent) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Watchdog_WatchdogAction_WatchdogEvent) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0].Descriptor() +} + +func (Watchdog_WatchdogAction_WatchdogEvent) Type() protoreflect.EnumType { + return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0] +} + +func (x Watchdog_WatchdogAction_WatchdogEvent) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Watchdog_WatchdogAction_WatchdogEvent.Descriptor instead. +func (Watchdog_WatchdogAction_WatchdogEvent) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0, 0} +} + +type CustomInlineHeader_InlineHeaderType int32 + +const ( + CustomInlineHeader_REQUEST_HEADER CustomInlineHeader_InlineHeaderType = 0 + CustomInlineHeader_REQUEST_TRAILER CustomInlineHeader_InlineHeaderType = 1 + CustomInlineHeader_RESPONSE_HEADER CustomInlineHeader_InlineHeaderType = 2 + CustomInlineHeader_RESPONSE_TRAILER CustomInlineHeader_InlineHeaderType = 3 +) + +// Enum value maps for CustomInlineHeader_InlineHeaderType. +var ( + CustomInlineHeader_InlineHeaderType_name = map[int32]string{ + 0: "REQUEST_HEADER", + 1: "REQUEST_TRAILER", + 2: "RESPONSE_HEADER", + 3: "RESPONSE_TRAILER", + } + CustomInlineHeader_InlineHeaderType_value = map[string]int32{ + "REQUEST_HEADER": 0, + "REQUEST_TRAILER": 1, + "RESPONSE_HEADER": 2, + "RESPONSE_TRAILER": 3, + } +) + +func (x CustomInlineHeader_InlineHeaderType) Enum() *CustomInlineHeader_InlineHeaderType { + p := new(CustomInlineHeader_InlineHeaderType) + *p = x + return p +} + +func (x CustomInlineHeader_InlineHeaderType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomInlineHeader_InlineHeaderType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1].Descriptor() +} + +func (CustomInlineHeader_InlineHeaderType) Type() protoreflect.EnumType { + return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1] +} + +func (x CustomInlineHeader_InlineHeaderType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomInlineHeader_InlineHeaderType.Descriptor instead. +func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9, 0} +} + +// Bootstrap :ref:`configuration overview `. +// [#next-free-field: 42] +type Bootstrap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of :ref:`Node ` field names + // that will be included in the context parameters of the effective + // xdstp:// URL that is sent in a discovery request when resource + // locators are used for LDS/CDS. Any non-string field will have its JSON + // encoding set as the context parameter value, with the exception of + // metadata, which will be flattened (see example below). The supported field + // names are: + // - "cluster" + // - "id" + // - "locality.region" + // - "locality.sub_zone" + // - "locality.zone" + // - "metadata" + // - "user_agent_build_version.metadata" + // - "user_agent_build_version.version" + // - "user_agent_name" + // - "user_agent_version" + // + // The node context parameters act as a base layer dictionary for the context + // parameters (i.e. more specific resource specific context parameters will + // override). Field names will be prefixed with “udpa.node.” when included in + // context parameters. + // + // For example, if node_context_params is “["user_agent_name", "metadata"]“, + // the implied context parameters might be:: + // + // node.user_agent_name: "envoy" + // node.metadata.foo: "{\"bar\": \"baz\"}" + // node.metadata.some: "42" + // node.metadata.thing: "\"thing\"" + // + // [#not-implemented-hide:] + NodeContextParams []string `protobuf:"bytes,26,rep,name=node_context_params,json=nodeContextParams,proto3" json:"node_context_params,omitempty"` + // Statically specified resources. + StaticResources *Bootstrap_StaticResources `protobuf:"bytes,2,opt,name=static_resources,json=staticResources,proto3" json:"static_resources,omitempty"` + // xDS configuration sources. + DynamicResources *Bootstrap_DynamicResources `protobuf:"bytes,3,opt,name=dynamic_resources,json=dynamicResources,proto3" json:"dynamic_resources,omitempty"` + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager *ClusterManager `protobuf:"bytes,4,opt,name=cluster_manager,json=clusterManager,proto3" json:"cluster_manager,omitempty"` + // Health discovery service config option. + // (:ref:`core.ApiConfigSource `) + HdsConfig *v3.ApiConfigSource `protobuf:"bytes,14,opt,name=hds_config,json=hdsConfig,proto3" json:"hds_config,omitempty"` + // Optional file system path to search for startup flag files. + FlagsPath string `protobuf:"bytes,5,opt,name=flags_path,json=flagsPath,proto3" json:"flags_path,omitempty"` + // Optional set of stats sinks. + StatsSinks []*v31.StatsSink `protobuf:"bytes,6,rep,name=stats_sinks,json=statsSinks,proto3" json:"stats_sinks,omitempty"` + // Options to control behaviors of deferred creation compatible stats. + DeferredStatOptions *Bootstrap_DeferredStatOptions `protobuf:"bytes,39,opt,name=deferred_stat_options,json=deferredStatOptions,proto3" json:"deferred_stat_options,omitempty"` + // Configuration for internal processing of stats. + StatsConfig *v31.StatsConfig `protobuf:"bytes,13,opt,name=stats_config,json=statsConfig,proto3" json:"stats_config,omitempty"` + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). Only one of “stats_flush_interval“ or “stats_flush_on_admin“ + // can be set. + // Duration must be at least 1ms and at most 5 min. + StatsFlushInterval *durationpb.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"` + // Types that are assignable to StatsFlush: + // + // *Bootstrap_StatsFlushOnAdmin + StatsFlush isBootstrap_StatsFlush `protobuf_oneof:"stats_flush"` + // Optional watchdog configuration. + // This is for a single watchdog configuration for the entire system. + // Deprecated in favor of “watchdogs“ which has finer granularity. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + Watchdog *Watchdog `protobuf:"bytes,8,opt,name=watchdog,proto3" json:"watchdog,omitempty"` + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + // [#extension-category: envoy.guarddog_actions] + Watchdogs *Watchdogs `protobuf:"bytes,27,opt,name=watchdogs,proto3" json:"watchdogs,omitempty"` + // Configuration for an external tracing provider. + // + // .. attention:: + // + // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider + // `. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + Tracing *v32.Tracing `protobuf:"bytes,9,opt,name=tracing,proto3" json:"tracing,omitempty"` + // Configuration for the runtime configuration provider. If not + // specified, a “null” provider will be used which will result in all defaults + // being used. + LayeredRuntime *LayeredRuntime `protobuf:"bytes,17,opt,name=layered_runtime,json=layeredRuntime,proto3" json:"layered_runtime,omitempty"` + // Configuration for the local administration HTTP server. + Admin *Admin `protobuf:"bytes,12,opt,name=admin,proto3" json:"admin,omitempty"` + // Optional overload manager configuration. + OverloadManager *v33.OverloadManager `protobuf:"bytes,15,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed value + // over the wire individually because the statsd protocol doesn't have any way to represent a + // histogram summary. Be aware that this can be a very large volume of data. + EnableDispatcherStats bool `protobuf:"varint,16,opt,name=enable_dispatcher_stats,json=enableDispatcherStats,proto3" json:"enable_dispatcher_stats,omitempty"` + // Optional string which will be used in lieu of x-envoy in prefixing headers. + // + // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be + // transformed into x-foo-retry-on etc. + // + // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the + // headers Envoy will trust for core code and core extensions only. Be VERY careful making + // changes to this string, especially in multi-layer Envoy deployments or deployments using + // extensions which are not upstream. + HeaderPrefix string `protobuf:"bytes,18,opt,name=header_prefix,json=headerPrefix,proto3" json:"header_prefix,omitempty"` + // Optional proxy version which will be used to set the value of :ref:`server.version statistic + // ` if specified. Envoy will not process this value, it will be sent as is to + // :ref:`stats sinks `. + StatsServerVersionOverride *wrapperspb.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"` + // Always use TCP queries instead of UDP queries for DNS lookups. + // This may be overridden on a per-cluster basis in cds_config, + // when :ref:`dns_resolvers ` and + // :ref:`use_tcp_for_dns_lookups ` are + // specified. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + UseTcpForDnsLookups bool `protobuf:"varint,20,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // DNS resolution configuration which includes the underlying dns resolver addresses and options. + // This may be overridden on a per-cluster basis in cds_config, when + // :ref:`dns_resolution_config ` + // is specified. + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + DnsResolutionConfig *v3.DnsResolutionConfig `protobuf:"bytes,30,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"` + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this “typed_dns_resolver_config“. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists, + // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“. + // When “typed_dns_resolver_config“ is missing, the default behavior is in place. + // [#extension-category: envoy.network.dns_resolver] + TypedDnsResolverConfig *v3.TypedExtensionConfig `protobuf:"bytes,31,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + // [#extension-category: envoy.bootstrap] + BootstrapExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,21,rep,name=bootstrap_extensions,json=bootstrapExtensions,proto3" json:"bootstrap_extensions,omitempty"` + // Specifies optional extensions instantiated at startup time and + // invoked during crash time on the request that caused the crash. + FatalActions []*FatalAction `protobuf:"bytes,28,rep,name=fatal_actions,json=fatalActions,proto3" json:"fatal_actions,omitempty"` + // Configuration sources that will participate in + // xdstp:// URL authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the xdstp:// URL, call + // this “resource_authority“. + // 2. “resource_authority“ is compared against the authorities in any peer + // “ConfigSource“. The peer “ConfigSource“ is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer “ConfigSource“ message is used. + // 3. “resource_authority“ is compared sequentially with the authorities in + // each configuration source in “config_sources“. The first “ConfigSource“ + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // “default_config_source“ is used. + // 5. If “default_config_source“ is not specified, resolution fails. + // + // [#not-implemented-hide:] + ConfigSources []*v3.ConfigSource `protobuf:"bytes,22,rep,name=config_sources,json=configSources,proto3" json:"config_sources,omitempty"` + // Default configuration source for xdstp:// URLs if all + // other resolution fails. + // [#not-implemented-hide:] + DefaultConfigSource *v3.ConfigSource `protobuf:"bytes,23,opt,name=default_config_source,json=defaultConfigSource,proto3" json:"default_config_source,omitempty"` + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + DefaultSocketInterface string `protobuf:"bytes,24,opt,name=default_socket_interface,json=defaultSocketInterface,proto3" json:"default_socket_interface,omitempty"` + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + CertificateProviderInstances map[string]*v3.TypedExtensionConfig `protobuf:"bytes,25,rep,name=certificate_provider_instances,json=certificateProviderInstances,proto3" json:"certificate_provider_instances,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + InlineHeaders []*CustomInlineHeader `protobuf:"bytes,32,rep,name=inline_headers,json=inlineHeaders,proto3" json:"inline_headers,omitempty"` + // Optional path to a file with performance tracing data created by "Perfetto" SDK in binary + // ProtoBuf format. The default value is "envoy.pftrace". + PerfTracingFilePath string `protobuf:"bytes,33,opt,name=perf_tracing_file_path,json=perfTracingFilePath,proto3" json:"perf_tracing_file_path,omitempty"` + // Optional overriding of default regex engine. + // If the value is not specified, Google RE2 will be used by default. + // [#extension-category: envoy.regex_engines] + DefaultRegexEngine *v3.TypedExtensionConfig `protobuf:"bytes,34,opt,name=default_regex_engine,json=defaultRegexEngine,proto3" json:"default_regex_engine,omitempty"` + // Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both + // fetch and load events during xDS processing. + // If a value is not specified, no XdsResourcesDelegate will be used. + // TODO(abeyad): Add public-facing documentation. + // [#not-implemented-hide:] + XdsDelegateExtension *v3.TypedExtensionConfig `protobuf:"bytes,35,opt,name=xds_delegate_extension,json=xdsDelegateExtension,proto3" json:"xds_delegate_extension,omitempty"` + // Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components, + // e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to + // process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used. + // + // .. note:: + // + // There are no in-repo extensions currently, and the :repo:`XdsConfigTracker ` + // interface should be implemented before using. + // See :repo:`xds_config_tracker_integration_test ` + // for an example usage of the interface. + XdsConfigTrackerExtension *v3.TypedExtensionConfig `protobuf:"bytes,36,opt,name=xds_config_tracker_extension,json=xdsConfigTrackerExtension,proto3" json:"xds_config_tracker_extension,omitempty"` + // [#not-implemented-hide:] + // This controls the type of listener manager configured for Envoy. Currently + // Envoy only supports ListenerManager for this field and Envoy Mobile + // supports ApiListenerManager. + ListenerManager *v3.TypedExtensionConfig `protobuf:"bytes,37,opt,name=listener_manager,json=listenerManager,proto3" json:"listener_manager,omitempty"` + // Optional application log configuration. + ApplicationLogConfig *Bootstrap_ApplicationLogConfig `protobuf:"bytes,38,opt,name=application_log_config,json=applicationLogConfig,proto3" json:"application_log_config,omitempty"` + // Optional gRPC async manager config. + GrpcAsyncClientManagerConfig *Bootstrap_GrpcAsyncClientManagerConfig `protobuf:"bytes,40,opt,name=grpc_async_client_manager_config,json=grpcAsyncClientManagerConfig,proto3" json:"grpc_async_client_manager_config,omitempty"` + // Optional configuration for memory allocation manager. + // Memory releasing is only supported for `tcmalloc allocator `_. + MemoryAllocatorManager *MemoryAllocatorManager `protobuf:"bytes,41,opt,name=memory_allocator_manager,json=memoryAllocatorManager,proto3" json:"memory_allocator_manager,omitempty"` +} + +func (x *Bootstrap) Reset() { + *x = Bootstrap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap) ProtoMessage() {} + +func (x *Bootstrap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap.ProtoReflect.Descriptor instead. +func (*Bootstrap) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0} +} + +func (x *Bootstrap) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *Bootstrap) GetNodeContextParams() []string { + if x != nil { + return x.NodeContextParams + } + return nil +} + +func (x *Bootstrap) GetStaticResources() *Bootstrap_StaticResources { + if x != nil { + return x.StaticResources + } + return nil +} + +func (x *Bootstrap) GetDynamicResources() *Bootstrap_DynamicResources { + if x != nil { + return x.DynamicResources + } + return nil +} + +func (x *Bootstrap) GetClusterManager() *ClusterManager { + if x != nil { + return x.ClusterManager + } + return nil +} + +func (x *Bootstrap) GetHdsConfig() *v3.ApiConfigSource { + if x != nil { + return x.HdsConfig + } + return nil +} + +func (x *Bootstrap) GetFlagsPath() string { + if x != nil { + return x.FlagsPath + } + return "" +} + +func (x *Bootstrap) GetStatsSinks() []*v31.StatsSink { + if x != nil { + return x.StatsSinks + } + return nil +} + +func (x *Bootstrap) GetDeferredStatOptions() *Bootstrap_DeferredStatOptions { + if x != nil { + return x.DeferredStatOptions + } + return nil +} + +func (x *Bootstrap) GetStatsConfig() *v31.StatsConfig { + if x != nil { + return x.StatsConfig + } + return nil +} + +func (x *Bootstrap) GetStatsFlushInterval() *durationpb.Duration { + if x != nil { + return x.StatsFlushInterval + } + return nil +} + +func (m *Bootstrap) GetStatsFlush() isBootstrap_StatsFlush { + if m != nil { + return m.StatsFlush + } + return nil +} + +func (x *Bootstrap) GetStatsFlushOnAdmin() bool { + if x, ok := x.GetStatsFlush().(*Bootstrap_StatsFlushOnAdmin); ok { + return x.StatsFlushOnAdmin + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetWatchdog() *Watchdog { + if x != nil { + return x.Watchdog + } + return nil +} + +func (x *Bootstrap) GetWatchdogs() *Watchdogs { + if x != nil { + return x.Watchdogs + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetTracing() *v32.Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *Bootstrap) GetLayeredRuntime() *LayeredRuntime { + if x != nil { + return x.LayeredRuntime + } + return nil +} + +func (x *Bootstrap) GetAdmin() *Admin { + if x != nil { + return x.Admin + } + return nil +} + +func (x *Bootstrap) GetOverloadManager() *v33.OverloadManager { + if x != nil { + return x.OverloadManager + } + return nil +} + +func (x *Bootstrap) GetEnableDispatcherStats() bool { + if x != nil { + return x.EnableDispatcherStats + } + return false +} + +func (x *Bootstrap) GetHeaderPrefix() string { + if x != nil { + return x.HeaderPrefix + } + return "" +} + +func (x *Bootstrap) GetStatsServerVersionOverride() *wrapperspb.UInt64Value { + if x != nil { + return x.StatsServerVersionOverride + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetDnsResolutionConfig() *v3.DnsResolutionConfig { + if x != nil { + return x.DnsResolutionConfig + } + return nil +} + +func (x *Bootstrap) GetTypedDnsResolverConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.TypedDnsResolverConfig + } + return nil +} + +func (x *Bootstrap) GetBootstrapExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.BootstrapExtensions + } + return nil +} + +func (x *Bootstrap) GetFatalActions() []*FatalAction { + if x != nil { + return x.FatalActions + } + return nil +} + +func (x *Bootstrap) GetConfigSources() []*v3.ConfigSource { + if x != nil { + return x.ConfigSources + } + return nil +} + +func (x *Bootstrap) GetDefaultConfigSource() *v3.ConfigSource { + if x != nil { + return x.DefaultConfigSource + } + return nil +} + +func (x *Bootstrap) GetDefaultSocketInterface() string { + if x != nil { + return x.DefaultSocketInterface + } + return "" +} + +func (x *Bootstrap) GetCertificateProviderInstances() map[string]*v3.TypedExtensionConfig { + if x != nil { + return x.CertificateProviderInstances + } + return nil +} + +func (x *Bootstrap) GetInlineHeaders() []*CustomInlineHeader { + if x != nil { + return x.InlineHeaders + } + return nil +} + +func (x *Bootstrap) GetPerfTracingFilePath() string { + if x != nil { + return x.PerfTracingFilePath + } + return "" +} + +func (x *Bootstrap) GetDefaultRegexEngine() *v3.TypedExtensionConfig { + if x != nil { + return x.DefaultRegexEngine + } + return nil +} + +func (x *Bootstrap) GetXdsDelegateExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsDelegateExtension + } + return nil +} + +func (x *Bootstrap) GetXdsConfigTrackerExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsConfigTrackerExtension + } + return nil +} + +func (x *Bootstrap) GetListenerManager() *v3.TypedExtensionConfig { + if x != nil { + return x.ListenerManager + } + return nil +} + +func (x *Bootstrap) GetApplicationLogConfig() *Bootstrap_ApplicationLogConfig { + if x != nil { + return x.ApplicationLogConfig + } + return nil +} + +func (x *Bootstrap) GetGrpcAsyncClientManagerConfig() *Bootstrap_GrpcAsyncClientManagerConfig { + if x != nil { + return x.GrpcAsyncClientManagerConfig + } + return nil +} + +func (x *Bootstrap) GetMemoryAllocatorManager() *MemoryAllocatorManager { + if x != nil { + return x.MemoryAllocatorManager + } + return nil +} + +type isBootstrap_StatsFlush interface { + isBootstrap_StatsFlush() +} + +type Bootstrap_StatsFlushOnAdmin struct { + // Flush stats to sinks only when queried for on the admin interface. If set, + // a flush timer is not created. Only one of “stats_flush_on_admin“ or + // “stats_flush_interval“ can be set. + StatsFlushOnAdmin bool `protobuf:"varint,29,opt,name=stats_flush_on_admin,json=statsFlushOnAdmin,proto3,oneof"` +} + +func (*Bootstrap_StatsFlushOnAdmin) isBootstrap_StatsFlush() {} + +// Administration interface :ref:`operations documentation +// `. +// [#next-free-field: 7] +type Admin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for :ref:`access logs ` + // emitted by the administration server. + AccessLog []*v34.AccessLog `protobuf:"bytes,5,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. This is only required if + // :ref:`address ` is set. + // Deprecated in favor of “access_log“ which offers more options. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + ProfilePath string `protobuf:"bytes,2,opt,name=profile_path,json=profilePath,proto3" json:"profile_path,omitempty"` + // The TCP address that the administration server will listen on. + // If not specified, Envoy will not start an administration server. + Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + SocketOptions []*v3.SocketOption `protobuf:"bytes,4,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Indicates whether :ref:`global_downstream_max_connections ` + // should apply to the admin interface or not. + IgnoreGlobalConnLimit bool `protobuf:"varint,6,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"` +} + +func (x *Admin) Reset() { + *x = Admin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Admin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Admin) ProtoMessage() {} + +func (x *Admin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Admin.ProtoReflect.Descriptor instead. +func (*Admin) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{1} +} + +func (x *Admin) GetAccessLog() []*v34.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Admin) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +func (x *Admin) GetProfilePath() string { + if x != nil { + return x.ProfilePath + } + return "" +} + +func (x *Admin) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Admin) GetSocketOptions() []*v3.SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *Admin) GetIgnoreGlobalConnLimit() bool { + if x != nil { + return x.IgnoreGlobalConnLimit + } + return false +} + +// Cluster manager :ref:`architecture overview `. +// [#next-free-field: 6] +type ClusterManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If “local_cluster_name“ is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is unrelated to + // the :option:`--service-cluster` option which does not `affect zone aware + // routing `_. + LocalClusterName string `protobuf:"bytes,1,opt,name=local_cluster_name,json=localClusterName,proto3" json:"local_cluster_name,omitempty"` + // Optional global configuration for outlier detection. + OutlierDetection *ClusterManager_OutlierDetection `protobuf:"bytes,2,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + UpstreamBindConfig *v3.BindConfig `protobuf:"bytes,3,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"` + // A management server endpoint to stream load stats to via + // “StreamLoadStats“. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + LoadStatsConfig *v3.ApiConfigSource `protobuf:"bytes,4,opt,name=load_stats_config,json=loadStatsConfig,proto3" json:"load_stats_config,omitempty"` + // Whether the ClusterManager will create clusters on the worker threads + // inline during requests. This will save memory and CPU cycles in cases where + // there are lots of inactive clusters and > 1 worker thread. + EnableDeferredClusterCreation bool `protobuf:"varint,5,opt,name=enable_deferred_cluster_creation,json=enableDeferredClusterCreation,proto3" json:"enable_deferred_cluster_creation,omitempty"` +} + +func (x *ClusterManager) Reset() { + *x = ClusterManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterManager) ProtoMessage() {} + +func (x *ClusterManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterManager.ProtoReflect.Descriptor instead. +func (*ClusterManager) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2} +} + +func (x *ClusterManager) GetLocalClusterName() string { + if x != nil { + return x.LocalClusterName + } + return "" +} + +func (x *ClusterManager) GetOutlierDetection() *ClusterManager_OutlierDetection { + if x != nil { + return x.OutlierDetection + } + return nil +} + +func (x *ClusterManager) GetUpstreamBindConfig() *v3.BindConfig { + if x != nil { + return x.UpstreamBindConfig + } + return nil +} + +func (x *ClusterManager) GetLoadStatsConfig() *v3.ApiConfigSource { + if x != nil { + return x.LoadStatsConfig + } + return nil +} + +func (x *ClusterManager) GetEnableDeferredClusterCreation() bool { + if x != nil { + return x.EnableDeferredClusterCreation + } + return false +} + +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +type Watchdogs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Watchdog for the main thread. + MainThreadWatchdog *Watchdog `protobuf:"bytes,1,opt,name=main_thread_watchdog,json=mainThreadWatchdog,proto3" json:"main_thread_watchdog,omitempty"` + // Watchdog for the worker threads. + WorkerWatchdog *Watchdog `protobuf:"bytes,2,opt,name=worker_watchdog,json=workerWatchdog,proto3" json:"worker_watchdog,omitempty"` +} + +func (x *Watchdogs) Reset() { + *x = Watchdogs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdogs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdogs) ProtoMessage() {} + +func (x *Watchdogs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdogs.ProtoReflect.Descriptor instead. +func (*Watchdogs) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{3} +} + +func (x *Watchdogs) GetMainThreadWatchdog() *Watchdog { + if x != nil { + return x.MainThreadWatchdog + } + return nil +} + +func (x *Watchdogs) GetWorkerWatchdog() *Watchdog { + if x != nil { + return x.WorkerWatchdog + } + return nil +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 8] +type Watchdog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Register actions that will fire on given WatchDog events. + // See “WatchDogAction“ for priority of events. + Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"` + // The duration after which Envoy counts a nonresponsive thread in the + // “watchdog_miss“ statistic. If not specified the default is 200ms. + MissTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"` + // The duration after which Envoy counts a nonresponsive thread in the + // “watchdog_mega_miss“ statistic. If not specified the default is + // 1000ms. + MegamissTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"` + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + KillTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"` + // Defines the maximum jitter used to adjust the “kill_timeout“ if “kill_timeout“ is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + MaxKillTimeoutJitter *durationpb.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"` + // If “max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))“ + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). + MultikillTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"` + // Sets the threshold for “multikill_timeout“ in terms of the percentage of + // nonresponsive threads required for the “multikill_timeout“. + // If not specified the default is 0. + MultikillThreshold *v35.Percent `protobuf:"bytes,5,opt,name=multikill_threshold,json=multikillThreshold,proto3" json:"multikill_threshold,omitempty"` +} + +func (x *Watchdog) Reset() { + *x = Watchdog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdog) ProtoMessage() {} + +func (x *Watchdog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdog.ProtoReflect.Descriptor instead. +func (*Watchdog) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4} +} + +func (x *Watchdog) GetActions() []*Watchdog_WatchdogAction { + if x != nil { + return x.Actions + } + return nil +} + +func (x *Watchdog) GetMissTimeout() *durationpb.Duration { + if x != nil { + return x.MissTimeout + } + return nil +} + +func (x *Watchdog) GetMegamissTimeout() *durationpb.Duration { + if x != nil { + return x.MegamissTimeout + } + return nil +} + +func (x *Watchdog) GetKillTimeout() *durationpb.Duration { + if x != nil { + return x.KillTimeout + } + return nil +} + +func (x *Watchdog) GetMaxKillTimeoutJitter() *durationpb.Duration { + if x != nil { + return x.MaxKillTimeoutJitter + } + return nil +} + +func (x *Watchdog) GetMultikillTimeout() *durationpb.Duration { + if x != nil { + return x.MultikillTimeout + } + return nil +} + +func (x *Watchdog) GetMultikillThreshold() *v35.Percent { + if x != nil { + return x.MultikillThreshold + } + return nil +} + +// Fatal actions to run while crashing. Actions can be safe (meaning they are +// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. +// If using an unsafe action that could get stuck or deadlock, it important to +// have an out of band system to terminate the process. +// +// The interface for the extension is “Envoy::Server::Configuration::FatalAction“. +// “FatalAction“ extensions live in the “envoy.extensions.fatal_actions“ API +// namespace. +type FatalAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extension specific configuration for the action. It's expected to conform + // to the “Envoy::Server::Configuration::FatalAction“ interface. + Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *FatalAction) Reset() { + *x = FatalAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FatalAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FatalAction) ProtoMessage() {} + +func (x *FatalAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FatalAction.ProtoReflect.Descriptor instead. +func (*FatalAction) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{5} +} + +func (x *FatalAction) GetConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.Config + } + return nil +} + +// Runtime :ref:`configuration overview ` (deprecated). +type Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. If this parameter is not set, there will be no disk based + // runtime. + SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"` + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + Subdirectory string `protobuf:"bytes,2,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"` + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + OverrideSubdirectory string `protobuf:"bytes,3,opt,name=override_subdirectory,json=overrideSubdirectory,proto3" json:"override_subdirectory,omitempty"` + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + Base *structpb.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"` +} + +func (x *Runtime) Reset() { + *x = Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Runtime) ProtoMessage() {} + +func (x *Runtime) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Runtime.ProtoReflect.Descriptor instead. +func (*Runtime) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{6} +} + +func (x *Runtime) GetSymlinkRoot() string { + if x != nil { + return x.SymlinkRoot + } + return "" +} + +func (x *Runtime) GetSubdirectory() string { + if x != nil { + return x.Subdirectory + } + return "" +} + +func (x *Runtime) GetOverrideSubdirectory() string { + if x != nil { + return x.OverrideSubdirectory + } + return "" +} + +func (x *Runtime) GetBase() *structpb.Struct { + if x != nil { + return x.Base + } + return nil +} + +// [#next-free-field: 6] +type RuntimeLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Descriptive name for the runtime layer. This is only used for the runtime + // :http:get:`/runtime` output. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to LayerSpecifier: + // + // *RuntimeLayer_StaticLayer + // *RuntimeLayer_DiskLayer_ + // *RuntimeLayer_AdminLayer_ + // *RuntimeLayer_RtdsLayer_ + LayerSpecifier isRuntimeLayer_LayerSpecifier `protobuf_oneof:"layer_specifier"` +} + +func (x *RuntimeLayer) Reset() { + *x = RuntimeLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer) ProtoMessage() {} + +func (x *RuntimeLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeLayer) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RuntimeLayer) GetLayerSpecifier() isRuntimeLayer_LayerSpecifier { + if m != nil { + return m.LayerSpecifier + } + return nil +} + +func (x *RuntimeLayer) GetStaticLayer() *structpb.Struct { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_StaticLayer); ok { + return x.StaticLayer + } + return nil +} + +func (x *RuntimeLayer) GetDiskLayer() *RuntimeLayer_DiskLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_DiskLayer_); ok { + return x.DiskLayer + } + return nil +} + +func (x *RuntimeLayer) GetAdminLayer() *RuntimeLayer_AdminLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_AdminLayer_); ok { + return x.AdminLayer + } + return nil +} + +func (x *RuntimeLayer) GetRtdsLayer() *RuntimeLayer_RtdsLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_RtdsLayer_); ok { + return x.RtdsLayer + } + return nil +} + +type isRuntimeLayer_LayerSpecifier interface { + isRuntimeLayer_LayerSpecifier() +} + +type RuntimeLayer_StaticLayer struct { + // :ref:`Static runtime ` layer. + // This follows the :ref:`runtime protobuf JSON representation encoding + // `. Unlike static xDS resources, this static + // layer is overridable by later layers in the runtime virtual filesystem. + StaticLayer *structpb.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"` +} + +type RuntimeLayer_DiskLayer_ struct { + DiskLayer *RuntimeLayer_DiskLayer `protobuf:"bytes,3,opt,name=disk_layer,json=diskLayer,proto3,oneof"` +} + +type RuntimeLayer_AdminLayer_ struct { + AdminLayer *RuntimeLayer_AdminLayer `protobuf:"bytes,4,opt,name=admin_layer,json=adminLayer,proto3,oneof"` +} + +type RuntimeLayer_RtdsLayer_ struct { + RtdsLayer *RuntimeLayer_RtdsLayer `protobuf:"bytes,5,opt,name=rtds_layer,json=rtdsLayer,proto3,oneof"` +} + +func (*RuntimeLayer_StaticLayer) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_DiskLayer_) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_AdminLayer_) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_RtdsLayer_) isRuntimeLayer_LayerSpecifier() {} + +// Runtime :ref:`configuration overview `. +type LayeredRuntime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The :ref:`layers ` of the runtime. This is ordered + // such that later layers in the list overlay earlier entries. + Layers []*RuntimeLayer `protobuf:"bytes,1,rep,name=layers,proto3" json:"layers,omitempty"` +} + +func (x *LayeredRuntime) Reset() { + *x = LayeredRuntime{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LayeredRuntime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LayeredRuntime) ProtoMessage() {} + +func (x *LayeredRuntime) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LayeredRuntime.ProtoReflect.Descriptor instead. +func (*LayeredRuntime) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{8} +} + +func (x *LayeredRuntime) GetLayers() []*RuntimeLayer { + if x != nil { + return x.Layers + } + return nil +} + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +type CustomInlineHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the header that is expected to be set as the inline header. + InlineHeaderName string `protobuf:"bytes,1,opt,name=inline_header_name,json=inlineHeaderName,proto3" json:"inline_header_name,omitempty"` + // The type of the header that is expected to be set as the inline header. + InlineHeaderType CustomInlineHeader_InlineHeaderType `protobuf:"varint,2,opt,name=inline_header_type,json=inlineHeaderType,proto3,enum=envoy.config.bootstrap.v3.CustomInlineHeader_InlineHeaderType" json:"inline_header_type,omitempty"` +} + +func (x *CustomInlineHeader) Reset() { + *x = CustomInlineHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomInlineHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomInlineHeader) ProtoMessage() {} + +func (x *CustomInlineHeader) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomInlineHeader.ProtoReflect.Descriptor instead. +func (*CustomInlineHeader) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9} +} + +func (x *CustomInlineHeader) GetInlineHeaderName() string { + if x != nil { + return x.InlineHeaderName + } + return "" +} + +func (x *CustomInlineHeader) GetInlineHeaderType() CustomInlineHeader_InlineHeaderType { + if x != nil { + return x.InlineHeaderType + } + return CustomInlineHeader_REQUEST_HEADER +} + +type MemoryAllocatorManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures tcmalloc to perform background release of free memory in amount of bytes per “memory_release_interval“ interval. + // If equals to “0“, no memory release will occur. Defaults to “0“. + BytesToRelease uint64 `protobuf:"varint,1,opt,name=bytes_to_release,json=bytesToRelease,proto3" json:"bytes_to_release,omitempty"` + // Interval in milliseconds for memory releasing. If specified, during every + // interval Envoy will try to release “bytes_to_release“ of free memory back to operating system for reuse. + // Defaults to 1000 milliseconds. + MemoryReleaseInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=memory_release_interval,json=memoryReleaseInterval,proto3" json:"memory_release_interval,omitempty"` +} + +func (x *MemoryAllocatorManager) Reset() { + *x = MemoryAllocatorManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryAllocatorManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryAllocatorManager) ProtoMessage() {} + +func (x *MemoryAllocatorManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryAllocatorManager.ProtoReflect.Descriptor instead. +func (*MemoryAllocatorManager) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{10} +} + +func (x *MemoryAllocatorManager) GetBytesToRelease() uint64 { + if x != nil { + return x.BytesToRelease + } + return 0 +} + +func (x *MemoryAllocatorManager) GetMemoryReleaseInterval() *durationpb.Duration { + if x != nil { + return x.MemoryReleaseInterval + } + return nil +} + +type Bootstrap_StaticResources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + Listeners []*v36.Listener `protobuf:"bytes,1,rep,name=listeners,proto3" json:"listeners,omitempty"` + // If a network based configuration source is specified for :ref:`cds_config + // `, it's necessary + // to have some initial cluster definitions available to allow Envoy to know + // how to speak to the management server. These cluster definitions may not + // use :ref:`EDS ` (i.e. they should be static + // IP or DNS-based). + Clusters []*v37.Cluster `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"` + // These static secrets can be used by :ref:`SdsSecretConfig + // ` + Secrets []*v38.Secret `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty"` +} + +func (x *Bootstrap_StaticResources) Reset() { + *x = Bootstrap_StaticResources{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_StaticResources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_StaticResources) ProtoMessage() {} + +func (x *Bootstrap_StaticResources) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_StaticResources.ProtoReflect.Descriptor instead. +func (*Bootstrap_StaticResources) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Bootstrap_StaticResources) GetListeners() []*v36.Listener { + if x != nil { + return x.Listeners + } + return nil +} + +func (x *Bootstrap_StaticResources) GetClusters() []*v37.Cluster { + if x != nil { + return x.Clusters + } + return nil +} + +func (x *Bootstrap_StaticResources) GetSecrets() []*v38.Secret { + if x != nil { + return x.Secrets + } + return nil +} + +// [#next-free-field: 7] +type Bootstrap_DynamicResources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + LdsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=lds_config,json=ldsConfig,proto3" json:"lds_config,omitempty"` + // xdstp:// resource locator for listener collection. + // [#not-implemented-hide:] + LdsResourcesLocator string `protobuf:"bytes,5,opt,name=lds_resources_locator,json=ldsResourcesLocator,proto3" json:"lds_resources_locator,omitempty"` + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + CdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=cds_config,json=cdsConfig,proto3" json:"cds_config,omitempty"` + // xdstp:// resource locator for cluster collection. + // [#not-implemented-hide:] + CdsResourcesLocator string `protobuf:"bytes,6,opt,name=cds_resources_locator,json=cdsResourcesLocator,proto3" json:"cds_resources_locator,omitempty"` + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + AdsConfig *v3.ApiConfigSource `protobuf:"bytes,3,opt,name=ads_config,json=adsConfig,proto3" json:"ads_config,omitempty"` +} + +func (x *Bootstrap_DynamicResources) Reset() { + *x = Bootstrap_DynamicResources{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_DynamicResources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_DynamicResources) ProtoMessage() {} + +func (x *Bootstrap_DynamicResources) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_DynamicResources.ProtoReflect.Descriptor instead. +func (*Bootstrap_DynamicResources) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Bootstrap_DynamicResources) GetLdsConfig() *v3.ConfigSource { + if x != nil { + return x.LdsConfig + } + return nil +} + +func (x *Bootstrap_DynamicResources) GetLdsResourcesLocator() string { + if x != nil { + return x.LdsResourcesLocator + } + return "" +} + +func (x *Bootstrap_DynamicResources) GetCdsConfig() *v3.ConfigSource { + if x != nil { + return x.CdsConfig + } + return nil +} + +func (x *Bootstrap_DynamicResources) GetCdsResourcesLocator() string { + if x != nil { + return x.CdsResourcesLocator + } + return "" +} + +func (x *Bootstrap_DynamicResources) GetAdsConfig() *v3.ApiConfigSource { + if x != nil { + return x.AdsConfig + } + return nil +} + +type Bootstrap_ApplicationLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional field to set the application logs format. If this field is set, it will override + // the default log format. Setting both this field and :option:`--log-format` command line + // option is not allowed, and will cause a bootstrap error. + LogFormat *Bootstrap_ApplicationLogConfig_LogFormat `protobuf:"bytes,1,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"` +} + +func (x *Bootstrap_ApplicationLogConfig) Reset() { + *x = Bootstrap_ApplicationLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_ApplicationLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_ApplicationLogConfig) ProtoMessage() {} + +func (x *Bootstrap_ApplicationLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_ApplicationLogConfig.ProtoReflect.Descriptor instead. +func (*Bootstrap_ApplicationLogConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Bootstrap_ApplicationLogConfig) GetLogFormat() *Bootstrap_ApplicationLogConfig_LogFormat { + if x != nil { + return x.LogFormat + } + return nil +} + +type Bootstrap_DeferredStatOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below). + // This will save memory and CPU cycles when creating the objects that own these stats, if those + // stats are never referenced throughout the lifetime of the process. However, it will incur additional + // memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats + // is updated for the first time. + // Groups of stats that will be lazily initialized: + // - Cluster traffic stats: a subgroup of the :ref:`cluster statistics ` + // that are used when requests are routed to the cluster. + EnableDeferredCreationStats bool `protobuf:"varint,1,opt,name=enable_deferred_creation_stats,json=enableDeferredCreationStats,proto3" json:"enable_deferred_creation_stats,omitempty"` +} + +func (x *Bootstrap_DeferredStatOptions) Reset() { + *x = Bootstrap_DeferredStatOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_DeferredStatOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_DeferredStatOptions) ProtoMessage() {} + +func (x *Bootstrap_DeferredStatOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_DeferredStatOptions.ProtoReflect.Descriptor instead. +func (*Bootstrap_DeferredStatOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *Bootstrap_DeferredStatOptions) GetEnableDeferredCreationStats() bool { + if x != nil { + return x.EnableDeferredCreationStats + } + return false +} + +type Bootstrap_GrpcAsyncClientManagerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional field to set the expiration time for the cached gRPC client object. + // The minimal value is 5s and the default is 50s. + MaxCachedEntryIdleDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"` +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) Reset() { + *x = Bootstrap_GrpcAsyncClientManagerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_GrpcAsyncClientManagerConfig) ProtoMessage() {} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_GrpcAsyncClientManagerConfig.ProtoReflect.Descriptor instead. +func (*Bootstrap_GrpcAsyncClientManagerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *durationpb.Duration { + if x != nil { + return x.MaxCachedEntryIdleDuration + } + return nil +} + +type Bootstrap_ApplicationLogConfig_LogFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LogFormat: + // + // *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat + // *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat + LogFormat isBootstrap_ApplicationLogConfig_LogFormat_LogFormat `protobuf_oneof:"log_format"` +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) Reset() { + *x = Bootstrap_ApplicationLogConfig_LogFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat) ProtoMessage() {} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_ApplicationLogConfig_LogFormat.ProtoReflect.Descriptor instead. +func (*Bootstrap_ApplicationLogConfig_LogFormat) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) GetLogFormat() isBootstrap_ApplicationLogConfig_LogFormat_LogFormat { + if m != nil { + return m.LogFormat + } + return nil +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *structpb.Struct { + if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { + return x.JsonFormat + } + return nil +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetTextFormat() string { + if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok { + return x.TextFormat + } + return "" +} + +type isBootstrap_ApplicationLogConfig_LogFormat_LogFormat interface { + isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() +} + +type Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat struct { + // Flush application logs in JSON format. The configured JSON struct can + // support all the format flags specified in the :option:`--log-format` + // command line options section, except for the “%v“ and “%_“ flags. + JsonFormat *structpb.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"` +} + +type Bootstrap_ApplicationLogConfig_LogFormat_TextFormat struct { + // Flush application log in a format defined by a string. The text format + // can support all the format flags specified in the :option:`--log-format` + // command line option section. + TextFormat string `protobuf:"bytes,2,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() { +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() { +} + +type ClusterManager_OutlierDetection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the path to the outlier event log. + EventLogPath string `protobuf:"bytes,1,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + EventService *v3.EventServiceConfig `protobuf:"bytes,2,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"` +} + +func (x *ClusterManager_OutlierDetection) Reset() { + *x = ClusterManager_OutlierDetection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterManager_OutlierDetection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterManager_OutlierDetection) ProtoMessage() {} + +func (x *ClusterManager_OutlierDetection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterManager_OutlierDetection.ProtoReflect.Descriptor instead. +func (*ClusterManager_OutlierDetection) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClusterManager_OutlierDetection) GetEventLogPath() string { + if x != nil { + return x.EventLogPath + } + return "" +} + +func (x *ClusterManager_OutlierDetection) GetEventService() *v3.EventServiceConfig { + if x != nil { + return x.EventService + } + return nil +} + +type Watchdog_WatchdogAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extension specific configuration for the action. + Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Event Watchdog_WatchdogAction_WatchdogEvent `protobuf:"varint,2,opt,name=event,proto3,enum=envoy.config.bootstrap.v3.Watchdog_WatchdogAction_WatchdogEvent" json:"event,omitempty"` +} + +func (x *Watchdog_WatchdogAction) Reset() { + *x = Watchdog_WatchdogAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdog_WatchdogAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdog_WatchdogAction) ProtoMessage() {} + +func (x *Watchdog_WatchdogAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdog_WatchdogAction.ProtoReflect.Descriptor instead. +func (*Watchdog_WatchdogAction) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *Watchdog_WatchdogAction) GetConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *Watchdog_WatchdogAction) GetEvent() Watchdog_WatchdogAction_WatchdogEvent { + if x != nil { + return x.Event + } + return Watchdog_WatchdogAction_UNKNOWN +} + +// :ref:`Disk runtime ` layer. +type RuntimeLayer_DiskLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. + // Envoy will watch the location for changes and reload the file system tree + // when they happen. See documentation on runtime :ref:`atomicity + // ` for further details on how reloads are + // treated. + SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"` + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + Subdirectory string `protobuf:"bytes,3,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"` + // :ref:`Append ` the + // service cluster to the path under symlink root. + AppendServiceCluster bool `protobuf:"varint,2,opt,name=append_service_cluster,json=appendServiceCluster,proto3" json:"append_service_cluster,omitempty"` +} + +func (x *RuntimeLayer_DiskLayer) Reset() { + *x = RuntimeLayer_DiskLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_DiskLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_DiskLayer) ProtoMessage() {} + +func (x *RuntimeLayer_DiskLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_DiskLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_DiskLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *RuntimeLayer_DiskLayer) GetSymlinkRoot() string { + if x != nil { + return x.SymlinkRoot + } + return "" +} + +func (x *RuntimeLayer_DiskLayer) GetSubdirectory() string { + if x != nil { + return x.Subdirectory + } + return "" +} + +func (x *RuntimeLayer_DiskLayer) GetAppendServiceCluster() bool { + if x != nil { + return x.AppendServiceCluster + } + return false +} + +// :ref:`Admin console runtime ` layer. +type RuntimeLayer_AdminLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RuntimeLayer_AdminLayer) Reset() { + *x = RuntimeLayer_AdminLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_AdminLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_AdminLayer) ProtoMessage() {} + +func (x *RuntimeLayer_AdminLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_AdminLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_AdminLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 1} +} + +// :ref:`Runtime Discovery Service (RTDS) ` layer. +type RuntimeLayer_RtdsLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource to subscribe to at “rtds_config“ for the RTDS layer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // RTDS configuration source. + RtdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=rtds_config,json=rtdsConfig,proto3" json:"rtds_config,omitempty"` +} + +func (x *RuntimeLayer_RtdsLayer) Reset() { + *x = RuntimeLayer_RtdsLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_RtdsLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_RtdsLayer) ProtoMessage() {} + +func (x *RuntimeLayer_RtdsLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_RtdsLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_RtdsLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *RuntimeLayer_RtdsLayer) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RuntimeLayer_RtdsLayer) GetRtdsConfig() *v3.ConfigSource { + if x != nil { + return x.RtdsConfig + } + return nil +} + +var File_envoy_config_bootstrap_v3_bootstrap_proto protoreflect.FileDescriptor + +var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, + 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x99, 0x24, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, + 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, + 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6e, 0x6f, + 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x5f, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, + 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x62, 0x0a, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x68, 0x64, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x68, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, + 0x0a, 0x0a, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a, + 0x0b, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, + 0x6b, 0x73, 0x12, 0x6c, 0x0a, 0x15, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x27, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x65, 0x66, + 0x65, 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x24, 0xfa, 0x42, 0x0e, 0xaa, 0x01, 0x0b, 0x1a, 0x03, 0x08, 0xac, 0x02, 0x32, + 0x04, 0x10, 0xc0, 0x84, 0x3d, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x12, 0x0b, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, + 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x14, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, + 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73, + 0x68, 0x4f, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x08, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x64, 0x6f, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x08, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x52, + 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x07, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x12, 0x52, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x5f, 0x0a, + 0x10, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x42, 0x09, 0x8a, 0x93, 0xb7, 0x2a, 0x04, 0x08, 0x01, 0x10, 0x01, 0x52, 0x0f, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5f, 0x0a, 0x1d, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x41, 0x0a, 0x17, + 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, + 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, + 0x6a, 0x0a, 0x15, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x14, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, + 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x15, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x13, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x1e, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x19, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1c, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x69, 0x6e, + 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x20, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x72, 0x66, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x70, 0x65, 0x72, 0x66, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x22, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x78, 0x64, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x23, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x78, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6b, 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x16, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x20, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x67, 0x72, 0x70, 0x63, 0x41, 0x73, + 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x18, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x16, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a, + 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0xf9, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, + 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a, + 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, + 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03, + 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, + 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, 0x0e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x20, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a, + 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, + 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55, + 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10, + 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, + 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02, + 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, + 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62, + 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, + 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64, + 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01, + 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, + 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, + 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74, + 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72, + 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, + 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12, + 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51, + 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13, + 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, + 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, + 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x17, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce sync.Once + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc +) + +func file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP() []byte { + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce.Do(func() { + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData) + }) + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData +} + +var file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{ + (Watchdog_WatchdogAction_WatchdogEvent)(0), // 0: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + (CustomInlineHeader_InlineHeaderType)(0), // 1: envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + (*Bootstrap)(nil), // 2: envoy.config.bootstrap.v3.Bootstrap + (*Admin)(nil), // 3: envoy.config.bootstrap.v3.Admin + (*ClusterManager)(nil), // 4: envoy.config.bootstrap.v3.ClusterManager + (*Watchdogs)(nil), // 5: envoy.config.bootstrap.v3.Watchdogs + (*Watchdog)(nil), // 6: envoy.config.bootstrap.v3.Watchdog + (*FatalAction)(nil), // 7: envoy.config.bootstrap.v3.FatalAction + (*Runtime)(nil), // 8: envoy.config.bootstrap.v3.Runtime + (*RuntimeLayer)(nil), // 9: envoy.config.bootstrap.v3.RuntimeLayer + (*LayeredRuntime)(nil), // 10: envoy.config.bootstrap.v3.LayeredRuntime + (*CustomInlineHeader)(nil), // 11: envoy.config.bootstrap.v3.CustomInlineHeader + (*MemoryAllocatorManager)(nil), // 12: envoy.config.bootstrap.v3.MemoryAllocatorManager + (*Bootstrap_StaticResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.StaticResources + (*Bootstrap_DynamicResources)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.DynamicResources + (*Bootstrap_ApplicationLogConfig)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + (*Bootstrap_DeferredStatOptions)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 17: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + nil, // 18: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 19: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + (*ClusterManager_OutlierDetection)(nil), // 20: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + (*Watchdog_WatchdogAction)(nil), // 21: envoy.config.bootstrap.v3.Watchdog.WatchdogAction + (*RuntimeLayer_DiskLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + (*RuntimeLayer_AdminLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + (*RuntimeLayer_RtdsLayer)(nil), // 24: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + (*v3.Node)(nil), // 25: envoy.config.core.v3.Node + (*v3.ApiConfigSource)(nil), // 26: envoy.config.core.v3.ApiConfigSource + (*v31.StatsSink)(nil), // 27: envoy.config.metrics.v3.StatsSink + (*v31.StatsConfig)(nil), // 28: envoy.config.metrics.v3.StatsConfig + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (*v32.Tracing)(nil), // 30: envoy.config.trace.v3.Tracing + (*v33.OverloadManager)(nil), // 31: envoy.config.overload.v3.OverloadManager + (*wrapperspb.UInt64Value)(nil), // 32: google.protobuf.UInt64Value + (*v3.DnsResolutionConfig)(nil), // 33: envoy.config.core.v3.DnsResolutionConfig + (*v3.TypedExtensionConfig)(nil), // 34: envoy.config.core.v3.TypedExtensionConfig + (*v3.ConfigSource)(nil), // 35: envoy.config.core.v3.ConfigSource + (*v34.AccessLog)(nil), // 36: envoy.config.accesslog.v3.AccessLog + (*v3.Address)(nil), // 37: envoy.config.core.v3.Address + (*v3.SocketOption)(nil), // 38: envoy.config.core.v3.SocketOption + (*v3.BindConfig)(nil), // 39: envoy.config.core.v3.BindConfig + (*v35.Percent)(nil), // 40: envoy.type.v3.Percent + (*structpb.Struct)(nil), // 41: google.protobuf.Struct + (*v36.Listener)(nil), // 42: envoy.config.listener.v3.Listener + (*v37.Cluster)(nil), // 43: envoy.config.cluster.v3.Cluster + (*v38.Secret)(nil), // 44: envoy.extensions.transport_sockets.tls.v3.Secret + (*v3.EventServiceConfig)(nil), // 45: envoy.config.core.v3.EventServiceConfig +} +var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{ + 25, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node + 13, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources + 14, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources + 4, // 3: envoy.config.bootstrap.v3.Bootstrap.cluster_manager:type_name -> envoy.config.bootstrap.v3.ClusterManager + 26, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource + 27, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink + 16, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + 28, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig + 29, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration + 6, // 9: envoy.config.bootstrap.v3.Bootstrap.watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 5, // 10: envoy.config.bootstrap.v3.Bootstrap.watchdogs:type_name -> envoy.config.bootstrap.v3.Watchdogs + 30, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing + 10, // 12: envoy.config.bootstrap.v3.Bootstrap.layered_runtime:type_name -> envoy.config.bootstrap.v3.LayeredRuntime + 3, // 13: envoy.config.bootstrap.v3.Bootstrap.admin:type_name -> envoy.config.bootstrap.v3.Admin + 31, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager + 32, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value + 33, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 34, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 7, // 19: envoy.config.bootstrap.v3.Bootstrap.fatal_actions:type_name -> envoy.config.bootstrap.v3.FatalAction + 35, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource + 35, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource + 18, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + 11, // 23: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader + 34, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + 17, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + 12, // 30: envoy.config.bootstrap.v3.Bootstrap.memory_allocator_manager:type_name -> envoy.config.bootstrap.v3.MemoryAllocatorManager + 36, // 31: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 37, // 32: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address + 38, // 33: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption + 20, // 34: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + 39, // 35: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 26, // 36: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource + 6, // 37: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 6, // 38: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 21, // 39: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction + 29, // 40: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration + 29, // 41: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration + 29, // 42: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration + 29, // 43: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration + 29, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration + 40, // 45: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent + 34, // 46: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 47: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct + 41, // 48: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct + 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + 24, // 51: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + 9, // 52: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer + 1, // 53: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + 29, // 54: envoy.config.bootstrap.v3.MemoryAllocatorManager.memory_release_interval:type_name -> google.protobuf.Duration + 42, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener + 43, // 56: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster + 44, // 57: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret + 35, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource + 35, // 59: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource + 26, // 60: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource + 19, // 61: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + 29, // 62: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration + 34, // 63: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 64: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct + 45, // 65: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 34, // 66: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 67: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + 35, // 68: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource + 69, // [69:69] is the sub-list for method output_type + 69, // [69:69] is the sub-list for method input_type + 69, // [69:69] is the sub-list for extension type_name + 69, // [69:69] is the sub-list for extension extendee + 0, // [0:69] is the sub-list for field type_name +} + +func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() } +func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { + if File_envoy_config_bootstrap_v3_bootstrap_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Admin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdogs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FatalAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LayeredRuntime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomInlineHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryAllocatorManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_StaticResources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_DynamicResources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_ApplicationLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_DeferredStatOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_GrpcAsyncClientManagerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_ApplicationLogConfig_LogFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterManager_OutlierDetection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdog_WatchdogAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_DiskLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_AdminLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_RtdsLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Bootstrap_StatsFlushOnAdmin)(nil), + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*RuntimeLayer_StaticLayer)(nil), + (*RuntimeLayer_DiskLayer_)(nil), + (*RuntimeLayer_AdminLayer_)(nil), + (*RuntimeLayer_RtdsLayer_)(nil), + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].OneofWrappers = []interface{}{ + (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat)(nil), + (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc, + NumEnums: 2, + NumMessages: 23, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes, + DependencyIndexes: file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs, + EnumInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes, + MessageInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes, + }.Build() + File_envoy_config_bootstrap_v3_bootstrap_proto = out.File + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = nil + file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = nil + file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go new file mode 100644 index 000000000..55724c095 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go @@ -0,0 +1,4501 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Bootstrap with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Bootstrap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BootstrapMultiError, or nil +// if none found. +func (m *Bootstrap) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStaticResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStaticResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDynamicResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetClusterManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FlagsPath + + for idx, item := range m.GetStatsSinks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDeferredStatOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDeferredStatOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStatsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetStatsFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BootstrapValidationError{ + field: "StatsFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(300*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte || dur >= lt { + err := BootstrapValidationError{ + field: "StatsFlushInterval", + reason: "value must be inside range [1ms, 5m0s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchdogs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchdogs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLayeredRuntime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLayeredRuntime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverloadManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverloadManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableDispatcherStats + + // no validation rules for HeaderPrefix + + if all { + switch v := interface{}(m.GetStatsServerVersionOverride()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsServerVersionOverride()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UseTcpForDnsLookups + + if all { + switch v := interface{}(m.GetDnsResolutionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetBootstrapExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetFatalActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetConfigSources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDefaultConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultSocketInterface + + { + sorted_keys := make([]string, len(m.GetCertificateProviderInstances())) + i := 0 + for key := range m.GetCertificateProviderInstances() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetCertificateProviderInstances()[key] + _ = val + + // no validation rules for CertificateProviderInstances[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + for idx, item := range m.GetInlineHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for PerfTracingFilePath + + if all { + switch v := interface{}(m.GetDefaultRegexEngine()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultRegexEngine()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetXdsDelegateExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsDelegateExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetXdsConfigTrackerExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsConfigTrackerExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetListenerManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApplicationLogConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApplicationLogConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcAsyncClientManagerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcAsyncClientManagerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMemoryAllocatorManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryAllocatorManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.StatsFlush.(type) { + case *Bootstrap_StatsFlushOnAdmin: + if v == nil { + err := BootstrapValidationError{ + field: "StatsFlush", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetStatsFlushOnAdmin() != true { + err := BootstrapValidationError{ + field: "StatsFlushOnAdmin", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return BootstrapMultiError(errors) + } + + return nil +} + +// BootstrapMultiError is an error wrapping multiple validation errors returned +// by Bootstrap.ValidateAll() if the designated constraints aren't met. +type BootstrapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BootstrapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BootstrapMultiError) AllErrors() []error { return m } + +// BootstrapValidationError is the validation error returned by +// Bootstrap.Validate if the designated constraints aren't met. +type BootstrapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BootstrapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BootstrapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BootstrapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BootstrapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BootstrapValidationError) ErrorName() string { return "BootstrapValidationError" } + +// Error satisfies the builtin error interface +func (e BootstrapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BootstrapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BootstrapValidationError{} + +// Validate checks the field values on Admin with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Admin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Admin with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in AdminMultiError, or nil if none found. +func (m *Admin) ValidateAll() error { + return m.validate(true) +} + +func (m *Admin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AccessLogPath + + // no validation rules for ProfilePath + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for IgnoreGlobalConnLimit + + if len(errors) > 0 { + return AdminMultiError(errors) + } + + return nil +} + +// AdminMultiError is an error wrapping multiple validation errors returned by +// Admin.ValidateAll() if the designated constraints aren't met. +type AdminMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdminMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdminMultiError) AllErrors() []error { return m } + +// AdminValidationError is the validation error returned by Admin.Validate if +// the designated constraints aren't met. +type AdminValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdminValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdminValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdminValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdminValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdminValidationError) ErrorName() string { return "AdminValidationError" } + +// Error satisfies the builtin error interface +func (e AdminValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdmin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdminValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdminValidationError{} + +// Validate checks the field values on ClusterManager with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterManagerMultiError, +// or nil if none found. +func (m *ClusterManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for LocalClusterName + + if all { + switch v := interface{}(m.GetOutlierDetection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamBindConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLoadStatsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadStatsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableDeferredClusterCreation + + if len(errors) > 0 { + return ClusterManagerMultiError(errors) + } + + return nil +} + +// ClusterManagerMultiError is an error wrapping multiple validation errors +// returned by ClusterManager.ValidateAll() if the designated constraints +// aren't met. +type ClusterManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterManagerMultiError) AllErrors() []error { return m } + +// ClusterManagerValidationError is the validation error returned by +// ClusterManager.Validate if the designated constraints aren't met. +type ClusterManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterManagerValidationError) ErrorName() string { return "ClusterManagerValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterManagerValidationError{} + +// Validate checks the field values on Watchdogs with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Watchdogs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdogs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WatchdogsMultiError, or nil +// if none found. +func (m *Watchdogs) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdogs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMainThreadWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMainThreadWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWorkerWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkerWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WatchdogsMultiError(errors) + } + + return nil +} + +// WatchdogsMultiError is an error wrapping multiple validation errors returned +// by Watchdogs.ValidateAll() if the designated constraints aren't met. +type WatchdogsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchdogsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchdogsMultiError) AllErrors() []error { return m } + +// WatchdogsValidationError is the validation error returned by +// Watchdogs.Validate if the designated constraints aren't met. +type WatchdogsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchdogsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchdogsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchdogsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchdogsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchdogsValidationError) ErrorName() string { return "WatchdogsValidationError" } + +// Error satisfies the builtin error interface +func (e WatchdogsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdogs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchdogsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchdogsValidationError{} + +// Validate checks the field values on Watchdog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Watchdog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WatchdogMultiError, or nil +// if none found. +func (m *Watchdog) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMissTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMissTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMegamissTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMegamissTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKillTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKillTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetMaxKillTimeoutJitter(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = WatchdogValidationError{ + field: "MaxKillTimeoutJitter", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := WatchdogValidationError{ + field: "MaxKillTimeoutJitter", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetMultikillTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMultikillTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMultikillThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMultikillThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WatchdogMultiError(errors) + } + + return nil +} + +// WatchdogMultiError is an error wrapping multiple validation errors returned +// by Watchdog.ValidateAll() if the designated constraints aren't met. +type WatchdogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchdogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchdogMultiError) AllErrors() []error { return m } + +// WatchdogValidationError is the validation error returned by +// Watchdog.Validate if the designated constraints aren't met. +type WatchdogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchdogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchdogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchdogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchdogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchdogValidationError) ErrorName() string { return "WatchdogValidationError" } + +// Error satisfies the builtin error interface +func (e WatchdogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchdogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchdogValidationError{} + +// Validate checks the field values on FatalAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FatalAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FatalAction with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FatalActionMultiError, or +// nil if none found. +func (m *FatalAction) ValidateAll() error { + return m.validate(true) +} + +func (m *FatalAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FatalActionMultiError(errors) + } + + return nil +} + +// FatalActionMultiError is an error wrapping multiple validation errors +// returned by FatalAction.ValidateAll() if the designated constraints aren't met. +type FatalActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FatalActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FatalActionMultiError) AllErrors() []error { return m } + +// FatalActionValidationError is the validation error returned by +// FatalAction.Validate if the designated constraints aren't met. +type FatalActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FatalActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FatalActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FatalActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FatalActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FatalActionValidationError) ErrorName() string { return "FatalActionValidationError" } + +// Error satisfies the builtin error interface +func (e FatalActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFatalAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FatalActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FatalActionValidationError{} + +// Validate checks the field values on Runtime with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Runtime) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Runtime with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RuntimeMultiError, or nil if none found. +func (m *Runtime) ValidateAll() error { + return m.validate(true) +} + +func (m *Runtime) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SymlinkRoot + + // no validation rules for Subdirectory + + // no validation rules for OverrideSubdirectory + + if all { + switch v := interface{}(m.GetBase()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBase()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RuntimeMultiError(errors) + } + + return nil +} + +// RuntimeMultiError is an error wrapping multiple validation errors returned +// by Runtime.ValidateAll() if the designated constraints aren't met. +type RuntimeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeMultiError) AllErrors() []error { return m } + +// RuntimeValidationError is the validation error returned by Runtime.Validate +// if the designated constraints aren't met. +type RuntimeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeValidationError) ErrorName() string { return "RuntimeValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntime.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeValidationError{} + +// Validate checks the field values on RuntimeLayer with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeLayerMultiError, or +// nil if none found. +func (m *RuntimeLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RuntimeLayerValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofLayerSpecifierPresent := false + switch v := m.LayerSpecifier.(type) { + case *RuntimeLayer_StaticLayer: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStaticLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStaticLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_DiskLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDiskLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDiskLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_AdminLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAdminLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdminLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_RtdsLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRtdsLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRtdsLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLayerSpecifierPresent { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayerMultiError is an error wrapping multiple validation errors +// returned by RuntimeLayer.ValidateAll() if the designated constraints aren't met. +type RuntimeLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayerValidationError is the validation error returned by +// RuntimeLayer.Validate if the designated constraints aren't met. +type RuntimeLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayerValidationError) ErrorName() string { return "RuntimeLayerValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayerValidationError{} + +// Validate checks the field values on LayeredRuntime with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LayeredRuntime) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LayeredRuntime with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LayeredRuntimeMultiError, +// or nil if none found. +func (m *LayeredRuntime) ValidateAll() error { + return m.validate(true) +} + +func (m *LayeredRuntime) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetLayers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LayeredRuntimeMultiError(errors) + } + + return nil +} + +// LayeredRuntimeMultiError is an error wrapping multiple validation errors +// returned by LayeredRuntime.ValidateAll() if the designated constraints +// aren't met. +type LayeredRuntimeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LayeredRuntimeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LayeredRuntimeMultiError) AllErrors() []error { return m } + +// LayeredRuntimeValidationError is the validation error returned by +// LayeredRuntime.Validate if the designated constraints aren't met. +type LayeredRuntimeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LayeredRuntimeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LayeredRuntimeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LayeredRuntimeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LayeredRuntimeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LayeredRuntimeValidationError) ErrorName() string { return "LayeredRuntimeValidationError" } + +// Error satisfies the builtin error interface +func (e LayeredRuntimeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLayeredRuntime.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LayeredRuntimeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LayeredRuntimeValidationError{} + +// Validate checks the field values on CustomInlineHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomInlineHeader) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomInlineHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomInlineHeaderMultiError, or nil if none found. +func (m *CustomInlineHeader) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomInlineHeader) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetInlineHeaderName()) < 1 { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_CustomInlineHeader_InlineHeaderName_Pattern.MatchString(m.GetInlineHeaderName()) { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := CustomInlineHeader_InlineHeaderType_name[int32(m.GetInlineHeaderType())]; !ok { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomInlineHeaderMultiError(errors) + } + + return nil +} + +// CustomInlineHeaderMultiError is an error wrapping multiple validation errors +// returned by CustomInlineHeader.ValidateAll() if the designated constraints +// aren't met. +type CustomInlineHeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomInlineHeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomInlineHeaderMultiError) AllErrors() []error { return m } + +// CustomInlineHeaderValidationError is the validation error returned by +// CustomInlineHeader.Validate if the designated constraints aren't met. +type CustomInlineHeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomInlineHeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomInlineHeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomInlineHeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomInlineHeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomInlineHeaderValidationError) ErrorName() string { + return "CustomInlineHeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomInlineHeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomInlineHeader.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomInlineHeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomInlineHeaderValidationError{} + +var _CustomInlineHeader_InlineHeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MemoryAllocatorManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MemoryAllocatorManagerMultiError, or nil if none found. +func (m *MemoryAllocatorManager) ValidateAll() error { + return m.validate(true) +} + +func (m *MemoryAllocatorManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesToRelease + + if all { + switch v := interface{}(m.GetMemoryReleaseInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryReleaseInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MemoryAllocatorManagerMultiError(errors) + } + + return nil +} + +// MemoryAllocatorManagerMultiError is an error wrapping multiple validation +// errors returned by MemoryAllocatorManager.ValidateAll() if the designated +// constraints aren't met. +type MemoryAllocatorManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MemoryAllocatorManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MemoryAllocatorManagerMultiError) AllErrors() []error { return m } + +// MemoryAllocatorManagerValidationError is the validation error returned by +// MemoryAllocatorManager.Validate if the designated constraints aren't met. +type MemoryAllocatorManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MemoryAllocatorManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MemoryAllocatorManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MemoryAllocatorManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MemoryAllocatorManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MemoryAllocatorManagerValidationError) ErrorName() string { + return "MemoryAllocatorManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e MemoryAllocatorManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMemoryAllocatorManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MemoryAllocatorManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MemoryAllocatorManagerValidationError{} + +// Validate checks the field values on Bootstrap_StaticResources with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_StaticResources) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_StaticResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Bootstrap_StaticResourcesMultiError, or nil if none found. +func (m *Bootstrap_StaticResources) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_StaticResources) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Bootstrap_StaticResourcesMultiError(errors) + } + + return nil +} + +// Bootstrap_StaticResourcesMultiError is an error wrapping multiple validation +// errors returned by Bootstrap_StaticResources.ValidateAll() if the +// designated constraints aren't met. +type Bootstrap_StaticResourcesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_StaticResourcesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_StaticResourcesMultiError) AllErrors() []error { return m } + +// Bootstrap_StaticResourcesValidationError is the validation error returned by +// Bootstrap_StaticResources.Validate if the designated constraints aren't met. +type Bootstrap_StaticResourcesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_StaticResourcesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_StaticResourcesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_StaticResourcesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_StaticResourcesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_StaticResourcesValidationError) ErrorName() string { + return "Bootstrap_StaticResourcesValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_StaticResourcesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_StaticResources.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_StaticResourcesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_StaticResourcesValidationError{} + +// Validate checks the field values on Bootstrap_DynamicResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_DynamicResources) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_DynamicResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Bootstrap_DynamicResourcesMultiError, or nil if none found. +func (m *Bootstrap_DynamicResources) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_DynamicResources) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for LdsResourcesLocator + + if all { + switch v := interface{}(m.GetCdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CdsResourcesLocator + + if all { + switch v := interface{}(m.GetAdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Bootstrap_DynamicResourcesMultiError(errors) + } + + return nil +} + +// Bootstrap_DynamicResourcesMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_DynamicResources.ValidateAll() if +// the designated constraints aren't met. +type Bootstrap_DynamicResourcesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_DynamicResourcesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_DynamicResourcesMultiError) AllErrors() []error { return m } + +// Bootstrap_DynamicResourcesValidationError is the validation error returned +// by Bootstrap_DynamicResources.Validate if the designated constraints aren't met. +type Bootstrap_DynamicResourcesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_DynamicResourcesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_DynamicResourcesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_DynamicResourcesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_DynamicResourcesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_DynamicResourcesValidationError) ErrorName() string { + return "Bootstrap_DynamicResourcesValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_DynamicResourcesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_DynamicResources.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_DynamicResourcesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_DynamicResourcesValidationError{} + +// Validate checks the field values on Bootstrap_ApplicationLogConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_ApplicationLogConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_ApplicationLogConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Bootstrap_ApplicationLogConfigMultiError, or nil if none found. +func (m *Bootstrap_ApplicationLogConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_ApplicationLogConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLogFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Bootstrap_ApplicationLogConfigMultiError(errors) + } + + return nil +} + +// Bootstrap_ApplicationLogConfigMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_ApplicationLogConfig.ValidateAll() +// if the designated constraints aren't met. +type Bootstrap_ApplicationLogConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_ApplicationLogConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_ApplicationLogConfigMultiError) AllErrors() []error { return m } + +// Bootstrap_ApplicationLogConfigValidationError is the validation error +// returned by Bootstrap_ApplicationLogConfig.Validate if the designated +// constraints aren't met. +type Bootstrap_ApplicationLogConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_ApplicationLogConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_ApplicationLogConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_ApplicationLogConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_ApplicationLogConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_ApplicationLogConfigValidationError) ErrorName() string { + return "Bootstrap_ApplicationLogConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_ApplicationLogConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_ApplicationLogConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_ApplicationLogConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_ApplicationLogConfigValidationError{} + +// Validate checks the field values on Bootstrap_DeferredStatOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_DeferredStatOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_DeferredStatOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Bootstrap_DeferredStatOptionsMultiError, or nil if none found. +func (m *Bootstrap_DeferredStatOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_DeferredStatOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EnableDeferredCreationStats + + if len(errors) > 0 { + return Bootstrap_DeferredStatOptionsMultiError(errors) + } + + return nil +} + +// Bootstrap_DeferredStatOptionsMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_DeferredStatOptions.ValidateAll() +// if the designated constraints aren't met. +type Bootstrap_DeferredStatOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_DeferredStatOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_DeferredStatOptionsMultiError) AllErrors() []error { return m } + +// Bootstrap_DeferredStatOptionsValidationError is the validation error +// returned by Bootstrap_DeferredStatOptions.Validate if the designated +// constraints aren't met. +type Bootstrap_DeferredStatOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_DeferredStatOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_DeferredStatOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_DeferredStatOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_DeferredStatOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_DeferredStatOptionsValidationError) ErrorName() string { + return "Bootstrap_DeferredStatOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_DeferredStatOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_DeferredStatOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_DeferredStatOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_DeferredStatOptionsValidationError{} + +// Validate checks the field values on Bootstrap_GrpcAsyncClientManagerConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Bootstrap_GrpcAsyncClientManagerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Bootstrap_GrpcAsyncClientManagerConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Bootstrap_GrpcAsyncClientManagerConfigMultiError, or nil if none found. +func (m *Bootstrap_GrpcAsyncClientManagerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetMaxCachedEntryIdleDuration(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Bootstrap_GrpcAsyncClientManagerConfigValidationError{ + field: "MaxCachedEntryIdleDuration", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(5*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := Bootstrap_GrpcAsyncClientManagerConfigValidationError{ + field: "MaxCachedEntryIdleDuration", + reason: "value must be greater than or equal to 5s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Bootstrap_GrpcAsyncClientManagerConfigMultiError(errors) + } + + return nil +} + +// Bootstrap_GrpcAsyncClientManagerConfigMultiError is an error wrapping +// multiple validation errors returned by +// Bootstrap_GrpcAsyncClientManagerConfig.ValidateAll() if the designated +// constraints aren't met. +type Bootstrap_GrpcAsyncClientManagerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) AllErrors() []error { return m } + +// Bootstrap_GrpcAsyncClientManagerConfigValidationError is the validation +// error returned by Bootstrap_GrpcAsyncClientManagerConfig.Validate if the +// designated constraints aren't met. +type Bootstrap_GrpcAsyncClientManagerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) ErrorName() string { + return "Bootstrap_GrpcAsyncClientManagerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_GrpcAsyncClientManagerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_GrpcAsyncClientManagerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_GrpcAsyncClientManagerConfigValidationError{} + +// Validate checks the field values on Bootstrap_ApplicationLogConfig_LogFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Bootstrap_ApplicationLogConfig_LogFormat) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Bootstrap_ApplicationLogConfig_LogFormat with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Bootstrap_ApplicationLogConfig_LogFormatMultiError, or nil if none found. +func (m *Bootstrap_ApplicationLogConfig_LogFormat) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofLogFormatPresent := false + switch v := m.LogFormat.(type) { + case *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat: + if v == nil { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLogFormatPresent = true + + if all { + switch v := interface{}(m.GetJsonFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat: + if v == nil { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLogFormatPresent = true + // no validation rules for TextFormat + default: + _ = v // ensures v is used + } + if !oneofLogFormatPresent { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Bootstrap_ApplicationLogConfig_LogFormatMultiError(errors) + } + + return nil +} + +// Bootstrap_ApplicationLogConfig_LogFormatMultiError is an error wrapping +// multiple validation errors returned by +// Bootstrap_ApplicationLogConfig_LogFormat.ValidateAll() if the designated +// constraints aren't met. +type Bootstrap_ApplicationLogConfig_LogFormatMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) AllErrors() []error { return m } + +// Bootstrap_ApplicationLogConfig_LogFormatValidationError is the validation +// error returned by Bootstrap_ApplicationLogConfig_LogFormat.Validate if the +// designated constraints aren't met. +type Bootstrap_ApplicationLogConfig_LogFormatValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) ErrorName() string { + return "Bootstrap_ApplicationLogConfig_LogFormatValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_ApplicationLogConfig_LogFormat.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_ApplicationLogConfig_LogFormatValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_ApplicationLogConfig_LogFormatValidationError{} + +// Validate checks the field values on ClusterManager_OutlierDetection with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterManager_OutlierDetection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterManager_OutlierDetection with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClusterManager_OutlierDetectionMultiError, or nil if none found. +func (m *ClusterManager_OutlierDetection) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterManager_OutlierDetection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EventLogPath + + if all { + switch v := interface{}(m.GetEventService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterManager_OutlierDetectionMultiError(errors) + } + + return nil +} + +// ClusterManager_OutlierDetectionMultiError is an error wrapping multiple +// validation errors returned by ClusterManager_OutlierDetection.ValidateAll() +// if the designated constraints aren't met. +type ClusterManager_OutlierDetectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterManager_OutlierDetectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterManager_OutlierDetectionMultiError) AllErrors() []error { return m } + +// ClusterManager_OutlierDetectionValidationError is the validation error +// returned by ClusterManager_OutlierDetection.Validate if the designated +// constraints aren't met. +type ClusterManager_OutlierDetectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterManager_OutlierDetectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterManager_OutlierDetectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterManager_OutlierDetectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterManager_OutlierDetectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterManager_OutlierDetectionValidationError) ErrorName() string { + return "ClusterManager_OutlierDetectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterManager_OutlierDetectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterManager_OutlierDetection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterManager_OutlierDetectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterManager_OutlierDetectionValidationError{} + +// Validate checks the field values on Watchdog_WatchdogAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Watchdog_WatchdogAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdog_WatchdogAction with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Watchdog_WatchdogActionMultiError, or nil if none found. +func (m *Watchdog_WatchdogAction) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdog_WatchdogAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := Watchdog_WatchdogAction_WatchdogEvent_name[int32(m.GetEvent())]; !ok { + err := Watchdog_WatchdogActionValidationError{ + field: "Event", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Watchdog_WatchdogActionMultiError(errors) + } + + return nil +} + +// Watchdog_WatchdogActionMultiError is an error wrapping multiple validation +// errors returned by Watchdog_WatchdogAction.ValidateAll() if the designated +// constraints aren't met. +type Watchdog_WatchdogActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Watchdog_WatchdogActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Watchdog_WatchdogActionMultiError) AllErrors() []error { return m } + +// Watchdog_WatchdogActionValidationError is the validation error returned by +// Watchdog_WatchdogAction.Validate if the designated constraints aren't met. +type Watchdog_WatchdogActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Watchdog_WatchdogActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Watchdog_WatchdogActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Watchdog_WatchdogActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Watchdog_WatchdogActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Watchdog_WatchdogActionValidationError) ErrorName() string { + return "Watchdog_WatchdogActionValidationError" +} + +// Error satisfies the builtin error interface +func (e Watchdog_WatchdogActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdog_WatchdogAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Watchdog_WatchdogActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Watchdog_WatchdogActionValidationError{} + +// Validate checks the field values on RuntimeLayer_DiskLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_DiskLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_DiskLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_DiskLayerMultiError, or nil if none found. +func (m *RuntimeLayer_DiskLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_DiskLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SymlinkRoot + + // no validation rules for Subdirectory + + // no validation rules for AppendServiceCluster + + if len(errors) > 0 { + return RuntimeLayer_DiskLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_DiskLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_DiskLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_DiskLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_DiskLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_DiskLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_DiskLayerValidationError is the validation error returned by +// RuntimeLayer_DiskLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_DiskLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_DiskLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_DiskLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_DiskLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_DiskLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_DiskLayerValidationError) ErrorName() string { + return "RuntimeLayer_DiskLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_DiskLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_DiskLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_DiskLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_DiskLayerValidationError{} + +// Validate checks the field values on RuntimeLayer_AdminLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_AdminLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_AdminLayer with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_AdminLayerMultiError, or nil if none found. +func (m *RuntimeLayer_AdminLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_AdminLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RuntimeLayer_AdminLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_AdminLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_AdminLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_AdminLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_AdminLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_AdminLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_AdminLayerValidationError is the validation error returned by +// RuntimeLayer_AdminLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_AdminLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_AdminLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_AdminLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_AdminLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_AdminLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_AdminLayerValidationError) ErrorName() string { + return "RuntimeLayer_AdminLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_AdminLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_AdminLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_AdminLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_AdminLayerValidationError{} + +// Validate checks the field values on RuntimeLayer_RtdsLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_RtdsLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_RtdsLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_RtdsLayerMultiError, or nil if none found. +func (m *RuntimeLayer_RtdsLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_RtdsLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetRtdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRtdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RuntimeLayer_RtdsLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_RtdsLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_RtdsLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_RtdsLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_RtdsLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_RtdsLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_RtdsLayerValidationError is the validation error returned by +// RuntimeLayer_RtdsLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_RtdsLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_RtdsLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_RtdsLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_RtdsLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_RtdsLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_RtdsLayerValidationError) ErrorName() string { + return "RuntimeLayer_RtdsLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_RtdsLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_RtdsLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_RtdsLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_RtdsLayerValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go new file mode 100644 index 000000000..51e10e0e0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go @@ -0,0 +1,3128 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Bootstrap_StaticResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_StaticResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StaticResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Secrets[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secrets[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Clusters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Clusters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Listeners) > 0 { + for iNdEx := len(m.Listeners) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Listeners[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Listeners[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DynamicResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DynamicResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DynamicResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CdsResourcesLocator) > 0 { + i -= len(m.CdsResourcesLocator) + copy(dAtA[i:], m.CdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CdsResourcesLocator))) + i-- + dAtA[i] = 0x32 + } + if len(m.LdsResourcesLocator) > 0 { + i -= len(m.LdsResourcesLocator) + copy(dAtA[i:], m.LdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LdsResourcesLocator))) + i-- + dAtA[i] = 0x2a + } + if m.AdsConfig != nil { + if vtmsg, ok := interface{}(m.AdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.CdsConfig != nil { + if vtmsg, ok := interface{}(m.CdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.LdsConfig != nil { + if vtmsg, ok := interface{}(m.LdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LogFormat != nil { + size, err := m.LogFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredCreationStats { + i-- + if m.EnableDeferredCreationStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCachedEntryIdleDuration != nil { + size, err := (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryAllocatorManager != nil { + size, err := m.MemoryAllocatorManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.GrpcAsyncClientManagerConfig != nil { + size, err := m.GrpcAsyncClientManagerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.DeferredStatOptions != nil { + size, err := m.DeferredStatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.ApplicationLogConfig != nil { + size, err := m.ApplicationLogConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.ListenerManager != nil { + if vtmsg, ok := interface{}(m.ListenerManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if m.XdsConfigTrackerExtension != nil { + if vtmsg, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsConfigTrackerExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.XdsDelegateExtension != nil { + if vtmsg, ok := interface{}(m.XdsDelegateExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsDelegateExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.DefaultRegexEngine != nil { + if vtmsg, ok := interface{}(m.DefaultRegexEngine).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultRegexEngine) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.PerfTracingFilePath) > 0 { + i -= len(m.PerfTracingFilePath) + copy(dAtA[i:], m.PerfTracingFilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PerfTracingFilePath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.InlineHeaders) > 0 { + for iNdEx := len(m.InlineHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if msg, ok := m.StatsFlush.(*Bootstrap_StatsFlushOnAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.FatalActions) > 0 { + for iNdEx := len(m.FatalActions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FatalActions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.Watchdogs != nil { + size, err := m.Watchdogs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.NodeContextParams) > 0 { + for iNdEx := len(m.NodeContextParams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NodeContextParams[iNdEx]) + copy(dAtA[i:], m.NodeContextParams[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NodeContextParams[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.CertificateProviderInstances) > 0 { + for k := range m.CertificateProviderInstances { + v := m.CertificateProviderInstances[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.DefaultSocketInterface) > 0 { + i -= len(m.DefaultSocketInterface) + copy(dAtA[i:], m.DefaultSocketInterface) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultSocketInterface))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.DefaultConfigSource != nil { + if vtmsg, ok := interface{}(m.DefaultConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.ConfigSources) > 0 { + for iNdEx := len(m.ConfigSources) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ConfigSources[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSources[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.BootstrapExtensions) > 0 { + for iNdEx := len(m.BootstrapExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.BootstrapExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BootstrapExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.StatsServerVersionOverride != nil { + size, err := (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.HeaderPrefix) > 0 { + i -= len(m.HeaderPrefix) + copy(dAtA[i:], m.HeaderPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.LayeredRuntime != nil { + size, err := m.LayeredRuntime.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.EnableDispatcherStats { + i-- + if m.EnableDispatcherStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.OverloadManager != nil { + if vtmsg, ok := interface{}(m.OverloadManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverloadManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } + if m.HdsConfig != nil { + if vtmsg, ok := interface{}(m.HdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StatsConfig != nil { + if vtmsg, ok := interface{}(m.StatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.Admin != nil { + size, err := m.Admin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Tracing != nil { + if vtmsg, ok := interface{}(m.Tracing).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Tracing) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Watchdog != nil { + size, err := m.Watchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.StatsFlushInterval != nil { + size, err := (*durationpb.Duration)(m.StatsFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.StatsSinks) > 0 { + for iNdEx := len(m.StatsSinks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.StatsSinks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsSinks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.FlagsPath) > 0 { + i -= len(m.FlagsPath) + copy(dAtA[i:], m.FlagsPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FlagsPath))) + i-- + dAtA[i] = 0x2a + } + if m.ClusterManager != nil { + size, err := m.ClusterManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DynamicResources != nil { + size, err := m.DynamicResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.StaticResources != nil { + size, err := m.StaticResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StatsFlushOnAdmin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + return len(dAtA) - i, nil +} +func (m *Admin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Admin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Admin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ProfilePath) > 0 { + i -= len(m.ProfilePath) + copy(dAtA[i:], m.ProfilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfilePath))) + i-- + dAtA[i] = 0x12 + } + if len(m.AccessLogPath) > 0 { + i -= len(m.AccessLogPath) + copy(dAtA[i:], m.AccessLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager_OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager_OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager_OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EventService != nil { + if vtmsg, ok := interface{}(m.EventService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EventService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredClusterCreation { + i-- + if m.EnableDeferredClusterCreation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LoadStatsConfig != nil { + if vtmsg, ok := interface{}(m.LoadStatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadStatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.LocalClusterName) > 0 { + i -= len(m.LocalClusterName) + copy(dAtA[i:], m.LocalClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LocalClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdogs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdogs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdogs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WorkerWatchdog != nil { + size, err := m.WorkerWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MainThreadWatchdog != nil { + size, err := m.MainThreadWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog_WatchdogAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog_WatchdogAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog_WatchdogAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Event != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Event)) + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.MaxKillTimeoutJitter != nil { + size, err := (*durationpb.Duration)(m.MaxKillTimeoutJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.MultikillThreshold != nil { + if vtmsg, ok := interface{}(m.MultikillThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MultikillThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.MultikillTimeout != nil { + size, err := (*durationpb.Duration)(m.MultikillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.KillTimeout != nil { + size, err := (*durationpb.Duration)(m.KillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MegamissTimeout != nil { + size, err := (*durationpb.Duration)(m.MegamissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MissTimeout != nil { + size, err := (*durationpb.Duration)(m.MissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FatalAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FatalAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FatalAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Runtime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Runtime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Runtime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Base != nil { + size, err := (*structpb.Struct)(m.Base).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.OverrideSubdirectory) > 0 { + i -= len(m.OverrideSubdirectory) + copy(dAtA[i:], m.OverrideSubdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideSubdirectory))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x12 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x1a + } + if m.AppendServiceCluster { + i-- + if m.AppendServiceCluster { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RtdsConfig != nil { + if vtmsg, ok := interface{}(m.RtdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RtdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_RtdsLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_AdminLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_DiskLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_StaticLayer); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_StaticLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_StaticLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StaticLayer != nil { + size, err := (*structpb.Struct)(m.StaticLayer).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_DiskLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DiskLayer != nil { + size, err := m.DiskLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_AdminLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AdminLayer != nil { + size, err := m.AdminLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_RtdsLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RtdsLayer != nil { + size, err := m.RtdsLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *LayeredRuntime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LayeredRuntime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LayeredRuntime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Layers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomInlineHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomInlineHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomInlineHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InlineHeaderType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InlineHeaderType)) + i-- + dAtA[i] = 0x10 + } + if len(m.InlineHeaderName) > 0 { + i -= len(m.InlineHeaderName) + copy(dAtA[i:], m.InlineHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineHeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemoryAllocatorManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryAllocatorManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MemoryAllocatorManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryReleaseInterval != nil { + size, err := (*durationpb.Duration)(m.MemoryReleaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BytesToRelease != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesToRelease)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StaticResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Listeners) > 0 { + for _, e := range m.Listeners { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DynamicResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LdsConfig != nil { + if size, ok := interface{}(m.LdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CdsConfig != nil { + if size, ok := interface{}(m.CdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdsConfig != nil { + if size, ok := interface{}(m.AdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LogFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Bootstrap_ApplicationLogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogFormat != nil { + l = m.LogFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DeferredStatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableDeferredCreationStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxCachedEntryIdleDuration != nil { + l = (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StaticResources != nil { + l = m.StaticResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicResources != nil { + l = m.DynamicResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterManager != nil { + l = m.ClusterManager.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.FlagsPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StatsSinks) > 0 { + for _, e := range m.StatsSinks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StatsFlushInterval != nil { + l = (*durationpb.Duration)(m.StatsFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Watchdog != nil { + l = m.Watchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + if size, ok := interface{}(m.Tracing).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Tracing) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Admin != nil { + l = m.Admin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsConfig != nil { + if size, ok := interface{}(m.StatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HdsConfig != nil { + if size, ok := interface{}(m.HdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverloadManager != nil { + if size, ok := interface{}(m.OverloadManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverloadManager) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDispatcherStats { + n += 3 + } + if m.LayeredRuntime != nil { + l = m.LayeredRuntime.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HeaderPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsServerVersionOverride != nil { + l = (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if len(m.BootstrapExtensions) > 0 { + for _, e := range m.BootstrapExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ConfigSources) > 0 { + for _, e := range m.ConfigSources { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DefaultConfigSource != nil { + if size, ok := interface{}(m.DefaultConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultConfigSource) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultSocketInterface) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CertificateProviderInstances) > 0 { + for k, v := range m.CertificateProviderInstances { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.NodeContextParams) > 0 { + for _, s := range m.NodeContextParams { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Watchdogs != nil { + l = m.Watchdogs.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FatalActions) > 0 { + for _, e := range m.FatalActions { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.StatsFlush.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InlineHeaders) > 0 { + for _, e := range m.InlineHeaders { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.PerfTracingFilePath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultRegexEngine != nil { + if size, ok := interface{}(m.DefaultRegexEngine).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultRegexEngine) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsDelegateExtension != nil { + if size, ok := interface{}(m.XdsDelegateExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsDelegateExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfigTrackerExtension != nil { + if size, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsConfigTrackerExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ListenerManager != nil { + if size, ok := interface{}(m.ListenerManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerManager) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationLogConfig != nil { + l = m.ApplicationLogConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeferredStatOptions != nil { + l = m.DeferredStatOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcAsyncClientManagerConfig != nil { + l = m.GrpcAsyncClientManagerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemoryAllocatorManager != nil { + l = m.MemoryAllocatorManager.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_StatsFlushOnAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *Admin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ProfilePath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreGlobalConnLimit { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager_OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EventLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + if size, ok := interface{}(m.EventService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EventService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LocalClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadStatsConfig != nil { + if size, ok := interface{}(m.LoadStatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadStatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDeferredClusterCreation { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdogs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MainThreadWatchdog != nil { + l = m.MainThreadWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WorkerWatchdog != nil { + l = m.WorkerWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog_WatchdogAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Event != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Event)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MissTimeout != nil { + l = (*durationpb.Duration)(m.MissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MegamissTimeout != nil { + l = (*durationpb.Duration)(m.MegamissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KillTimeout != nil { + l = (*durationpb.Duration)(m.KillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillTimeout != nil { + l = (*durationpb.Duration)(m.MultikillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillThreshold != nil { + if size, ok := interface{}(m.MultikillThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MultikillThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxKillTimeoutJitter != nil { + l = (*durationpb.Duration)(m.MaxKillTimeoutJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FatalAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Runtime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OverrideSubdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Base != nil { + l = (*structpb.Struct)(m.Base).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_DiskLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendServiceCluster { + n += 2 + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_AdminLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_RtdsLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RtdsConfig != nil { + if size, ok := interface{}(m.RtdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RtdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LayerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_StaticLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StaticLayer != nil { + l = (*structpb.Struct)(m.StaticLayer).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_DiskLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DiskLayer != nil { + l = m.DiskLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_AdminLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AdminLayer != nil { + l = m.AdminLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_RtdsLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RtdsLayer != nil { + l = m.RtdsLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LayeredRuntime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, e := range m.Layers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CustomInlineHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InlineHeaderType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.InlineHeaderType)) + } + n += len(m.unknownFields) + return n +} + +func (m *MemoryAllocatorManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesToRelease != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesToRelease)) + } + if m.MemoryReleaseInterval != nil { + l = (*durationpb.Duration)(m.MemoryReleaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go new file mode 100644 index 000000000..cffadb9d8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go @@ -0,0 +1,507 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +type CircuitBreakers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + Thresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,1,rep,name=thresholds,proto3" json:"thresholds,omitempty"` + // Optional per-host limits which apply to each individual host in a cluster. + // + // .. note:: + // + // currently only the :ref:`max_connections + // ` field is supported for per-host limits. + // + // If multiple per-host :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no per-host Thresholds are defined for a given + // :ref:`RoutingPriority`, + // the cluster will not have per-host limits. + PerHostThresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,2,rep,name=per_host_thresholds,json=perHostThresholds,proto3" json:"per_host_thresholds,omitempty"` +} + +func (x *CircuitBreakers) Reset() { + *x = CircuitBreakers{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers) ProtoMessage() {} + +func (x *CircuitBreakers) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers.ProtoReflect.Descriptor instead. +func (*CircuitBreakers) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0} +} + +func (x *CircuitBreakers) GetThresholds() []*CircuitBreakers_Thresholds { + if x != nil { + return x.Thresholds + } + return nil +} + +func (x *CircuitBreakers) GetPerHostThresholds() []*CircuitBreakers_Thresholds { + if x != nil { + return x.PerHostThresholds + } + return nil +} + +// A Thresholds defines CircuitBreaker settings for a +// :ref:`RoutingPriority`. +// [#next-free-field: 9] +type CircuitBreakers_Thresholds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + Priority v3.RoutingPriority `protobuf:"varint,1,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"` + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + MaxConnections *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + // This limit is applied as a connection limit for non-HTTP traffic. + MaxPendingRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + // This limit does not apply to non-HTTP traffic. + MaxRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"` + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + MaxRetries *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget *CircuitBreakers_Thresholds_RetryBudget `protobuf:"bytes,8,opt,name=retry_budget,json=retryBudget,proto3" json:"retry_budget,omitempty"` + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. + TrackRemaining bool `protobuf:"varint,6,opt,name=track_remaining,json=trackRemaining,proto3" json:"track_remaining,omitempty"` + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + MaxConnectionPools *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"` +} + +func (x *CircuitBreakers_Thresholds) Reset() { + *x = CircuitBreakers_Thresholds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers_Thresholds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers_Thresholds) ProtoMessage() {} + +func (x *CircuitBreakers_Thresholds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers_Thresholds.ProtoReflect.Descriptor instead. +func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CircuitBreakers_Thresholds) GetPriority() v3.RoutingPriority { + if x != nil { + return x.Priority + } + return v3.RoutingPriority(0) +} + +func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnections + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxPendingRequests + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequests + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRetries + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetRetryBudget() *CircuitBreakers_Thresholds_RetryBudget { + if x != nil { + return x.RetryBudget + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetTrackRemaining() bool { + if x != nil { + return x.TrackRemaining + } + return false +} + +func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnectionPools + } + return nil +} + +type CircuitBreakers_Thresholds_RetryBudget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + BudgetPercent *v31.Percent `protobuf:"bytes,1,opt,name=budget_percent,json=budgetPercent,proto3" json:"budget_percent,omitempty"` + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + MinRetryConcurrency *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"` +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) Reset() { + *x = CircuitBreakers_Thresholds_RetryBudget{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers_Thresholds_RetryBudget) ProtoMessage() {} + +func (x *CircuitBreakers_Thresholds_RetryBudget) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers_Thresholds_RetryBudget.ProtoReflect.Descriptor instead. +func (*CircuitBreakers_Thresholds_RetryBudget) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) GetBudgetPercent() *v31.Percent { + if x != nil { + return x.BudgetPercent + } + return nil +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrapperspb.UInt32Value { + if x != nil { + return x.MinRetryConcurrency + } + return nil +} + +var File_envoy_config_cluster_v3_circuit_breaker_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xe5, 0x08, 0x0a, 0x0f, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, + 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, + 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x0a, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x63, 0x0a, 0x13, 0x70, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x11, 0x70, 0x65, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x1a, + 0xea, 0x06, 0x0a, 0x0a, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4b, + 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x6d, + 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, + 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, + 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x1a, + 0xe2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, + 0x3d, 0x0a, 0x0e, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x0d, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x50, + 0x0a, 0x15, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6d, 0x69, 0x6e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, + 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, + 0x64, 0x67, 0x65, 0x74, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x43, 0x69, 0x72, + 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc +) + +func file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData +} + +var file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = []interface{}{ + (*CircuitBreakers)(nil), // 0: envoy.config.cluster.v3.CircuitBreakers + (*CircuitBreakers_Thresholds)(nil), // 1: envoy.config.cluster.v3.CircuitBreakers.Thresholds + (*CircuitBreakers_Thresholds_RetryBudget)(nil), // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget + (v3.RoutingPriority)(0), // 3: envoy.config.core.v3.RoutingPriority + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*v31.Percent)(nil), // 5: envoy.type.v3.Percent +} +var file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.CircuitBreakers.thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds + 1, // 1: envoy.config.cluster.v3.CircuitBreakers.per_host_thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds + 3, // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.priority:type_name -> envoy.config.core.v3.RoutingPriority + 4, // 3: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connections:type_name -> google.protobuf.UInt32Value + 4, // 4: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests:type_name -> google.protobuf.UInt32Value + 4, // 5: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_requests:type_name -> google.protobuf.UInt32Value + 4, // 6: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_retries:type_name -> google.protobuf.UInt32Value + 2, // 7: envoy.config.cluster.v3.CircuitBreakers.Thresholds.retry_budget:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget + 4, // 8: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connection_pools:type_name -> google.protobuf.UInt32Value + 5, // 9: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.budget_percent:type_name -> envoy.type.v3.Percent + 4, // 10: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.min_retry_concurrency:type_name -> google.protobuf.UInt32Value + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_circuit_breaker_proto_init() } +func file_envoy_config_cluster_v3_circuit_breaker_proto_init() { + if File_envoy_config_cluster_v3_circuit_breaker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers_Thresholds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers_Thresholds_RetryBudget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_circuit_breaker_proto = out.File + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = nil + file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = nil + file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go new file mode 100644 index 000000000..8bf3373be --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go @@ -0,0 +1,662 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RoutingPriority(0) +) + +// Validate checks the field values on CircuitBreakers with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CircuitBreakers) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CircuitBreakers with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CircuitBreakersMultiError, or nil if none found. +func (m *CircuitBreakers) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetThresholds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetPerHostThresholds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CircuitBreakersMultiError(errors) + } + + return nil +} + +// CircuitBreakersMultiError is an error wrapping multiple validation errors +// returned by CircuitBreakers.ValidateAll() if the designated constraints +// aren't met. +type CircuitBreakersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakersMultiError) AllErrors() []error { return m } + +// CircuitBreakersValidationError is the validation error returned by +// CircuitBreakers.Validate if the designated constraints aren't met. +type CircuitBreakersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakersValidationError) ErrorName() string { return "CircuitBreakersValidationError" } + +// Error satisfies the builtin error interface +func (e CircuitBreakersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakersValidationError{} + +// Validate checks the field values on CircuitBreakers_Thresholds with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CircuitBreakers_Thresholds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CircuitBreakers_Thresholds with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CircuitBreakers_ThresholdsMultiError, or nil if none found. +func (m *CircuitBreakers_Thresholds) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers_Thresholds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok { + err := CircuitBreakers_ThresholdsValidationError{ + field: "Priority", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMaxConnections()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnections()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxPendingRequests()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxPendingRequests()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxRequests()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequests()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryBudget()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBudget()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrackRemaining + + if all { + switch v := interface{}(m.GetMaxConnectionPools()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnectionPools()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CircuitBreakers_ThresholdsMultiError(errors) + } + + return nil +} + +// CircuitBreakers_ThresholdsMultiError is an error wrapping multiple +// validation errors returned by CircuitBreakers_Thresholds.ValidateAll() if +// the designated constraints aren't met. +type CircuitBreakers_ThresholdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakers_ThresholdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakers_ThresholdsMultiError) AllErrors() []error { return m } + +// CircuitBreakers_ThresholdsValidationError is the validation error returned +// by CircuitBreakers_Thresholds.Validate if the designated constraints aren't met. +type CircuitBreakers_ThresholdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakers_ThresholdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakers_ThresholdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakers_ThresholdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakers_ThresholdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakers_ThresholdsValidationError) ErrorName() string { + return "CircuitBreakers_ThresholdsValidationError" +} + +// Error satisfies the builtin error interface +func (e CircuitBreakers_ThresholdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers_Thresholds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakers_ThresholdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakers_ThresholdsValidationError{} + +// Validate checks the field values on CircuitBreakers_Thresholds_RetryBudget +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *CircuitBreakers_Thresholds_RetryBudget) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CircuitBreakers_Thresholds_RetryBudget with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// CircuitBreakers_Thresholds_RetryBudgetMultiError, or nil if none found. +func (m *CircuitBreakers_Thresholds_RetryBudget) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBudgetPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBudgetPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinRetryConcurrency()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinRetryConcurrency()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CircuitBreakers_Thresholds_RetryBudgetMultiError(errors) + } + + return nil +} + +// CircuitBreakers_Thresholds_RetryBudgetMultiError is an error wrapping +// multiple validation errors returned by +// CircuitBreakers_Thresholds_RetryBudget.ValidateAll() if the designated +// constraints aren't met. +type CircuitBreakers_Thresholds_RetryBudgetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) AllErrors() []error { return m } + +// CircuitBreakers_Thresholds_RetryBudgetValidationError is the validation +// error returned by CircuitBreakers_Thresholds_RetryBudget.Validate if the +// designated constraints aren't met. +type CircuitBreakers_Thresholds_RetryBudgetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) ErrorName() string { + return "CircuitBreakers_Thresholds_RetryBudgetValidationError" +} + +// Error satisfies the builtin error interface +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers_Thresholds_RetryBudget.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakers_Thresholds_RetryBudgetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakers_Thresholds_RetryBudgetValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go new file mode 100644 index 000000000..14ca0a1f1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go @@ -0,0 +1,337 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinRetryConcurrency != nil { + size, err := (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BudgetPercent != nil { + if vtmsg, ok := interface{}(m.BudgetPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BudgetPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryBudget != nil { + size, err := m.RetryBudget.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxConnectionPools != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionPools).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TrackRemaining { + i-- + if m.TrackRemaining { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.MaxRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxPendingRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPendingRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConnections != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnections).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PerHostThresholds) > 0 { + for iNdEx := len(m.PerHostThresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PerHostThresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Thresholds) > 0 { + for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BudgetPercent != nil { + if size, ok := interface{}(m.BudgetPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BudgetPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinRetryConcurrency != nil { + l = (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers_Thresholds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.MaxConnections != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnections).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxPendingRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPendingRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRetries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackRemaining { + n += 2 + } + if m.MaxConnectionPools != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionPools).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryBudget != nil { + l = m.RetryBudget.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Thresholds) > 0 { + for _, e := range m.Thresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.PerHostThresholds) > 0 { + for _, e := range m.PerHostThresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go new file mode 100644 index 000000000..53c9afecd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -0,0 +1,4635 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Refer to :ref:`service discovery type ` +// for an explanation on each type. +type Cluster_DiscoveryType int32 + +const ( + // Refer to the :ref:`static discovery type` + // for an explanation. + Cluster_STATIC Cluster_DiscoveryType = 0 + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + Cluster_STRICT_DNS Cluster_DiscoveryType = 1 + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + Cluster_LOGICAL_DNS Cluster_DiscoveryType = 2 + // Refer to the :ref:`service discovery type` + // for an explanation. + Cluster_EDS Cluster_DiscoveryType = 3 + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + Cluster_ORIGINAL_DST Cluster_DiscoveryType = 4 +) + +// Enum value maps for Cluster_DiscoveryType. +var ( + Cluster_DiscoveryType_name = map[int32]string{ + 0: "STATIC", + 1: "STRICT_DNS", + 2: "LOGICAL_DNS", + 3: "EDS", + 4: "ORIGINAL_DST", + } + Cluster_DiscoveryType_value = map[string]int32{ + "STATIC": 0, + "STRICT_DNS": 1, + "LOGICAL_DNS": 2, + "EDS": 3, + "ORIGINAL_DST": 4, + } +) + +func (x Cluster_DiscoveryType) Enum() *Cluster_DiscoveryType { + p := new(Cluster_DiscoveryType) + *p = x + return p +} + +func (x Cluster_DiscoveryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_DiscoveryType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[0].Descriptor() +} + +func (Cluster_DiscoveryType) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[0] +} + +func (x Cluster_DiscoveryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_DiscoveryType.Descriptor instead. +func (Cluster_DiscoveryType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 0} +} + +// Refer to :ref:`load balancer type ` architecture +// overview section for information on each type. +type Cluster_LbPolicy int32 + +const ( + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + Cluster_ROUND_ROBIN Cluster_LbPolicy = 0 + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + Cluster_LEAST_REQUEST Cluster_LbPolicy = 1 + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + Cluster_RING_HASH Cluster_LbPolicy = 2 + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + Cluster_RANDOM Cluster_LbPolicy = 3 + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + Cluster_MAGLEV Cluster_LbPolicy = 5 + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + Cluster_CLUSTER_PROVIDED Cluster_LbPolicy = 6 + // Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // This has been deprecated in favor of using the :ref:`load_balancing_policy + // ` field without + // setting any value in :ref:`lb_policy`. + Cluster_LOAD_BALANCING_POLICY_CONFIG Cluster_LbPolicy = 7 +) + +// Enum value maps for Cluster_LbPolicy. +var ( + Cluster_LbPolicy_name = map[int32]string{ + 0: "ROUND_ROBIN", + 1: "LEAST_REQUEST", + 2: "RING_HASH", + 3: "RANDOM", + 5: "MAGLEV", + 6: "CLUSTER_PROVIDED", + 7: "LOAD_BALANCING_POLICY_CONFIG", + } + Cluster_LbPolicy_value = map[string]int32{ + "ROUND_ROBIN": 0, + "LEAST_REQUEST": 1, + "RING_HASH": 2, + "RANDOM": 3, + "MAGLEV": 5, + "CLUSTER_PROVIDED": 6, + "LOAD_BALANCING_POLICY_CONFIG": 7, + } +) + +func (x Cluster_LbPolicy) Enum() *Cluster_LbPolicy { + p := new(Cluster_LbPolicy) + *p = x + return p +} + +func (x Cluster_LbPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[1].Descriptor() +} + +func (Cluster_LbPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[1] +} + +func (x Cluster_LbPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbPolicy.Descriptor instead. +func (Cluster_LbPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 1} +} + +// When V4_ONLY is selected, the DNS resolver will only perform a lookup for +// addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will +// only perform a lookup for addresses in the IPv6 family. If AUTO is +// specified, the DNS resolver will first perform a lookup for addresses in +// the IPv6 family and fallback to a lookup for addresses in the IPv4 family. +// This is semantically equivalent to a non-existent V6_PREFERRED option. +// AUTO is a legacy name that is more opaque than +// necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. +// If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the +// IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback +// target will only get v6 addresses if there were NO v4 addresses to return. +// If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, +// and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for +// upstream connections. Refer to :ref:`Happy Eyeballs Support ` +// for more information. +// For cluster types other than +// :ref:`STRICT_DNS` and +// :ref:`LOGICAL_DNS`, +// this setting is +// ignored. +// [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] +type Cluster_DnsLookupFamily int32 + +const ( + Cluster_AUTO Cluster_DnsLookupFamily = 0 + Cluster_V4_ONLY Cluster_DnsLookupFamily = 1 + Cluster_V6_ONLY Cluster_DnsLookupFamily = 2 + Cluster_V4_PREFERRED Cluster_DnsLookupFamily = 3 + Cluster_ALL Cluster_DnsLookupFamily = 4 +) + +// Enum value maps for Cluster_DnsLookupFamily. +var ( + Cluster_DnsLookupFamily_name = map[int32]string{ + 0: "AUTO", + 1: "V4_ONLY", + 2: "V6_ONLY", + 3: "V4_PREFERRED", + 4: "ALL", + } + Cluster_DnsLookupFamily_value = map[string]int32{ + "AUTO": 0, + "V4_ONLY": 1, + "V6_ONLY": 2, + "V4_PREFERRED": 3, + "ALL": 4, + } +) + +func (x Cluster_DnsLookupFamily) Enum() *Cluster_DnsLookupFamily { + p := new(Cluster_DnsLookupFamily) + *p = x + return p +} + +func (x Cluster_DnsLookupFamily) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_DnsLookupFamily) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[2].Descriptor() +} + +func (Cluster_DnsLookupFamily) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[2] +} + +func (x Cluster_DnsLookupFamily) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_DnsLookupFamily.Descriptor instead. +func (Cluster_DnsLookupFamily) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 2} +} + +type Cluster_ClusterProtocolSelection int32 + +const ( + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + Cluster_USE_CONFIGURED_PROTOCOL Cluster_ClusterProtocolSelection = 0 + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + Cluster_USE_DOWNSTREAM_PROTOCOL Cluster_ClusterProtocolSelection = 1 +) + +// Enum value maps for Cluster_ClusterProtocolSelection. +var ( + Cluster_ClusterProtocolSelection_name = map[int32]string{ + 0: "USE_CONFIGURED_PROTOCOL", + 1: "USE_DOWNSTREAM_PROTOCOL", + } + Cluster_ClusterProtocolSelection_value = map[string]int32{ + "USE_CONFIGURED_PROTOCOL": 0, + "USE_DOWNSTREAM_PROTOCOL": 1, + } +) + +func (x Cluster_ClusterProtocolSelection) Enum() *Cluster_ClusterProtocolSelection { + p := new(Cluster_ClusterProtocolSelection) + *p = x + return p +} + +func (x Cluster_ClusterProtocolSelection) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_ClusterProtocolSelection) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[3].Descriptor() +} + +func (Cluster_ClusterProtocolSelection) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[3] +} + +func (x Cluster_ClusterProtocolSelection) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_ClusterProtocolSelection.Descriptor instead. +func (Cluster_ClusterProtocolSelection) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3} +} + +// If NO_FALLBACK is selected, a result +// equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, +// any cluster endpoint may be returned (subject to policy, health checks, +// etc). If DEFAULT_SUBSET is selected, load balancing is performed over the +// endpoints matching the values from the default_subset field. +type Cluster_LbSubsetConfig_LbSubsetFallbackPolicy int32 + +const ( + Cluster_LbSubsetConfig_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 0 + Cluster_LbSubsetConfig_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 1 + Cluster_LbSubsetConfig_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 2 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_name = map[int32]string{ + 0: "NO_FALLBACK", + 1: "ANY_ENDPOINT", + 2: "DEFAULT_SUBSET", + } + Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_value = map[string]int32{ + "NO_FALLBACK": 0, + "ANY_ENDPOINT": 1, + "DEFAULT_SUBSET": 2, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[4].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[4] +} + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0} +} + +type Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy int32 + +const ( + // No fallback. Route metadata will be used as-is. + Cluster_LbSubsetConfig_METADATA_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 0 + // A special metadata key “fallback_list“ will be used to provide variants of metadata to try. + // Value of “fallback_list“ key has to be a list. Every list element has to be a struct - it will + // be merged with route metadata, overriding keys that appear in both places. + // “fallback_list“ entries will be used in order until a host is found. + // + // “fallback_list“ key itself is removed from metadata before subset load balancing is performed. + // + // Example: + // + // for metadata: + // + // .. code-block:: yaml + // + // version: 1.0 + // fallback_list: + // - version: 2.0 + // hardware: c64 + // - hardware: c32 + // - version: 3.0 + // + // at first, metadata: + // + // .. code-block:: json + // + // {"version": "2.0", "hardware": "c64"} + // + // will be used for load balancing. If no host is found, metadata: + // + // .. code-block:: json + // + // {"version": "1.0", "hardware": "c32"} + // + // is next to try. If it still results in no host, finally metadata: + // + // .. code-block:: json + // + // {"version": "3.0"} + // + // is used. + Cluster_LbSubsetConfig_FALLBACK_LIST Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 1 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name = map[int32]string{ + 0: "METADATA_NO_FALLBACK", + 1: "FALLBACK_LIST", + } + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_value = map[string]int32{ + "METADATA_NO_FALLBACK": 0, + "FALLBACK_LIST": 1, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[5].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[5] +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 1} +} + +// Allows to override top level fallback policy per selector. +type Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy int32 + +const ( + // If NOT_DEFINED top level config fallback policy is used instead. + Cluster_LbSubsetConfig_LbSubsetSelector_NOT_DEFINED Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 0 + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + Cluster_LbSubsetConfig_LbSubsetSelector_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 1 + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + Cluster_LbSubsetConfig_LbSubsetSelector_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 2 + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + Cluster_LbSubsetConfig_LbSubsetSelector_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 3 + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + Cluster_LbSubsetConfig_LbSubsetSelector_KEYS_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 4 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_name = map[int32]string{ + 0: "NOT_DEFINED", + 1: "NO_FALLBACK", + 2: "ANY_ENDPOINT", + 3: "DEFAULT_SUBSET", + 4: "KEYS_SUBSET", + } + Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_value = map[string]int32{ + "NOT_DEFINED": 0, + "NO_FALLBACK": 1, + "ANY_ENDPOINT": 2, + "DEFAULT_SUBSET": 3, + "KEYS_SUBSET": 4, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[6].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[6] +} + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0, 0} +} + +// The hash function used to hash hosts onto the ketama ring. +type Cluster_RingHashLbConfig_HashFunction int32 + +const ( + // Use `xxHash `_, this is the default hash function. + Cluster_RingHashLbConfig_XX_HASH Cluster_RingHashLbConfig_HashFunction = 0 + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + Cluster_RingHashLbConfig_MURMUR_HASH_2 Cluster_RingHashLbConfig_HashFunction = 1 +) + +// Enum value maps for Cluster_RingHashLbConfig_HashFunction. +var ( + Cluster_RingHashLbConfig_HashFunction_name = map[int32]string{ + 0: "XX_HASH", + 1: "MURMUR_HASH_2", + } + Cluster_RingHashLbConfig_HashFunction_value = map[string]int32{ + "XX_HASH": 0, + "MURMUR_HASH_2": 1, + } +) + +func (x Cluster_RingHashLbConfig_HashFunction) Enum() *Cluster_RingHashLbConfig_HashFunction { + p := new(Cluster_RingHashLbConfig_HashFunction) + *p = x + return p +} + +func (x Cluster_RingHashLbConfig_HashFunction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_RingHashLbConfig_HashFunction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[7].Descriptor() +} + +func (Cluster_RingHashLbConfig_HashFunction) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[7] +} + +func (x Cluster_RingHashLbConfig_HashFunction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_RingHashLbConfig_HashFunction.Descriptor instead. +func (Cluster_RingHashLbConfig_HashFunction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7, 0} +} + +type UpstreamConnectionOptions_FirstAddressFamilyVersion int32 + +const ( + // respect the native ranking of destination ip addresses returned from dns + // resolution + UpstreamConnectionOptions_DEFAULT UpstreamConnectionOptions_FirstAddressFamilyVersion = 0 + UpstreamConnectionOptions_V4 UpstreamConnectionOptions_FirstAddressFamilyVersion = 1 + UpstreamConnectionOptions_V6 UpstreamConnectionOptions_FirstAddressFamilyVersion = 2 +) + +// Enum value maps for UpstreamConnectionOptions_FirstAddressFamilyVersion. +var ( + UpstreamConnectionOptions_FirstAddressFamilyVersion_name = map[int32]string{ + 0: "DEFAULT", + 1: "V4", + 2: "V6", + } + UpstreamConnectionOptions_FirstAddressFamilyVersion_value = map[string]int32{ + "DEFAULT": 0, + "V4": 1, + "V6": 2, + } +) + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Enum() *UpstreamConnectionOptions_FirstAddressFamilyVersion { + p := new(UpstreamConnectionOptions_FirstAddressFamilyVersion) + *p = x + return p +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[8].Descriptor() +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[8] +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpstreamConnectionOptions_FirstAddressFamilyVersion.Descriptor instead. +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + +// Cluster list collections. Entries are “Cluster“ resources or references. +// [#not-implemented-hide:] +type ClusterCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries *v3.CollectionEntry `protobuf:"bytes,1,opt,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *ClusterCollection) Reset() { + *x = ClusterCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterCollection) ProtoMessage() {} + +func (x *ClusterCollection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterCollection.ProtoReflect.Descriptor instead. +func (*ClusterCollection) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterCollection) GetEntries() *v3.CollectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +// Configuration for a single upstream cluster. +// [#next-free-field: 58] +type Cluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration to use different transport sockets for different endpoints. The entry of + // “envoy.transport_socket_match“ in the :ref:`LbEndpoint.Metadata + // ` is used to match against the + // transport sockets as they appear in the list. If a match is not found, the search continues in + // :ref:`LocalityLbEndpoints.Metadata + // `. The first :ref:`match + // ` is used. For example, with + // the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under “envoy.transport_socket_match“ + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under “envoy.transport_socket_match“ does not match any + // “TransportSocketMatch“, the locality metadata is then checked for a match. Barring any + // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the + // “tls_context“ or “transport_socket“ specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // “TransportSocketMatch“ in this field. Other client Envoys receive CDS without + // “transport_socket_match“ set, and still send plain text traffic to the same cluster. + // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + TransportSocketMatches []*Cluster_TransportSocketMatch `protobuf:"bytes,43,rep,name=transport_socket_matches,json=transportSocketMatches,proto3" json:"transport_socket_matches,omitempty"` + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any “:“ in the cluster name will be converted to “_“ when emitting statistics. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // An optional alternative to the cluster name to be used for observability. This name is used + // emitting stats for the cluster and access logging the cluster name. This will appear as + // additional information in configuration dumps of a cluster's current status as + // :ref:`observability_name ` + // and as an additional tag "upstream_cluster.name" while tracing. Note: Any “:“ in the name + // will be converted to “_“ when emitting statistics. This should not be confused with + // :ref:`Router Filter Header `. + AltStatName string `protobuf:"bytes,28,opt,name=alt_stat_name,json=altStatName,proto3" json:"alt_stat_name,omitempty"` + // Types that are assignable to ClusterDiscoveryType: + // + // *Cluster_Type + // *Cluster_ClusterType + ClusterDiscoveryType isCluster_ClusterDiscoveryType `protobuf_oneof:"cluster_discovery_type"` + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig *Cluster_EdsClusterConfig `protobuf:"bytes,3,opt,name=eds_cluster_config,json=edsClusterConfig,proto3" json:"eds_cluster_config,omitempty"` + // The timeout for new network connections to hosts in the cluster. + // If not set, a default value of 5s will be used. + ConnectTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy Cluster_LbPolicy `protobuf:"varint,6,opt,name=lb_policy,json=lbPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbPolicy" json:"lb_policy,omitempty"` + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the “hosts“ field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + LoadAssignment *v31.ClusterLoadAssignment `protobuf:"bytes,33,opt,name=load_assignment,json=loadAssignment,proto3" json:"load_assignment,omitempty"` + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + HealthChecks []*v32.HealthCheck `protobuf:"bytes,8,rep,name=health_checks,json=healthChecks,proto3" json:"health_checks,omitempty"` + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + // + // .. attention:: + // + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` + // Optional :ref:`circuit breaking ` for the cluster. + CircuitBreakers *CircuitBreakers `protobuf:"bytes,10,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + // This has been deprecated in favor of + // :ref:`upstream_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // upstream_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + UpstreamHttpProtocolOptions *v32.UpstreamHttpProtocolOptions `protobuf:"bytes,46,opt,name=upstream_http_protocol_options,json=upstreamHttpProtocolOptions,proto3" json:"upstream_http_protocol_options,omitempty"` + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + // This has been deprecated in favor of + // :ref:`common_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // common_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + CommonHttpProtocolOptions *v32.HttpProtocolOptions `protobuf:"bytes,29,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"` + // Additional options when handling HTTP1 requests. + // This has been deprecated in favor of http_protocol_options fields in the + // :ref:`http_protocol_options ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + HttpProtocolOptions *v32.Http1ProtocolOptions `protobuf:"bytes,13,opt,name=http_protocol_options,json=httpProtocolOptions,proto3" json:"http_protocol_options,omitempty"` + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, “http2_protocol_options“ must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + // This has been deprecated in favor of http2_protocol_options fields in the + // :ref:`http_protocol_options ` + // message. http2_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + Http2ProtocolOptions *v32.Http2ProtocolOptions `protobuf:"bytes,14,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + // [#next-major-version: make this a list of typed extensions.] + TypedExtensionProtocolOptions map[string]*anypb.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + DnsFailureRefreshRate *Cluster_RefreshRate `protobuf:"bytes,44,opt,name=dns_failure_refresh_rate,json=dnsFailureRefreshRate,proto3" json:"dns_failure_refresh_rate,omitempty"` + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + RespectDnsTtl bool `protobuf:"varint,39,opt,name=respect_dns_ttl,json=respectDnsTtl,proto3" json:"respect_dns_ttl,omitempty"` + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily Cluster_DnsLookupFamily `protobuf:"varint,17,opt,name=dns_lookup_family,json=dnsLookupFamily,proto3,enum=envoy.config.cluster.v3.Cluster_DnsLookupFamily" json:"dns_lookup_family,omitempty"` + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + DnsResolvers []*v32.Address `protobuf:"bytes,18,rep,name=dns_resolvers,json=dnsResolvers,proto3" json:"dns_resolvers,omitempty"` + // Always use TCP queries instead of UDP queries for DNS lookups. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + UseTcpForDnsLookups bool `protobuf:"varint,45,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // DNS resolution configuration which includes the underlying dns resolver addresses and options. + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + DnsResolutionConfig *v32.DnsResolutionConfig `protobuf:"bytes,53,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"` + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this “typed_dns_resolver_config“. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists, + // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“. + // When “typed_dns_resolver_config“ is missing, the default behavior is in place. + // [#extension-category: envoy.network.dns_resolver] + TypedDnsResolverConfig *v32.TypedExtensionConfig `protobuf:"bytes,55,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // or :ref:`Redis Cluster`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + WaitForWarmOnInit *wrapperspb.BoolValue `protobuf:"bytes,54,opt,name=wait_for_warm_on_init,json=waitForWarmOnInit,proto3" json:"wait_for_warm_on_init,omitempty"` + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + OutlierDetection *OutlierDetection `protobuf:"bytes,19,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + CleanupInterval *durationpb.Duration `protobuf:"bytes,20,opt,name=cleanup_interval,json=cleanupInterval,proto3" json:"cleanup_interval,omitempty"` + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + UpstreamBindConfig *v32.BindConfig `protobuf:"bytes,21,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"` + // Configuration for load balancing subsetting. + LbSubsetConfig *Cluster_LbSubsetConfig `protobuf:"bytes,22,opt,name=lb_subset_config,json=lbSubsetConfig,proto3" json:"lb_subset_config,omitempty"` + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH`, + // :ref:`MAGLEV` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + // + // Types that are assignable to LbConfig: + // + // *Cluster_RingHashLbConfig_ + // *Cluster_MaglevLbConfig_ + // *Cluster_OriginalDstLbConfig_ + // *Cluster_LeastRequestLbConfig_ + // *Cluster_RoundRobinLbConfig_ + LbConfig isCluster_LbConfig `protobuf_oneof:"lb_config"` + // Common configuration for all load balancer implementations. + CommonLbConfig *Cluster_CommonLbConfig `protobuf:"bytes,27,opt,name=common_lb_config,json=commonLbConfig,proto3" json:"common_lb_config,omitempty"` + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name “envoy.transport_sockets.tls“ and + // :ref:`UpstreamTlsContexts ` in the “typed_config“. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + TransportSocket *v32.TransportSocket `protobuf:"bytes,24,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as “envoy.filters.http.router“. + Metadata *v32.Metadata `protobuf:"bytes,25,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Determines how Envoy selects the protocol used to speak to upstream hosts. + // This has been deprecated in favor of setting explicit protocol selection + // in the :ref:`http_protocol_options + // ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + ProtocolSelection Cluster_ClusterProtocolSelection `protobuf:"varint,26,opt,name=protocol_selection,json=protocolSelection,proto3,enum=envoy.config.cluster.v3.Cluster_ClusterProtocolSelection" json:"protocol_selection,omitempty"` + // Optional options for upstream connections. + UpstreamConnectionOptions *UpstreamConnectionOptions `protobuf:"bytes,30,opt,name=upstream_connection_options,json=upstreamConnectionOptions,proto3" json:"upstream_connection_options,omitempty"` + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + CloseConnectionsOnHostHealthFailure bool `protobuf:"varint,31,opt,name=close_connections_on_host_health_failure,json=closeConnectionsOnHostHealthFailure,proto3" json:"close_connections_on_host_health_failure,omitempty"` + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + IgnoreHealthOnHostRemoval bool `protobuf:"varint,32,opt,name=ignore_health_on_host_removal,json=ignoreHealthOnHostRemoval,proto3" json:"ignore_health_on_host_removal,omitempty"` + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + Filters []*Filter `protobuf:"bytes,40,rep,name=filters,proto3" json:"filters,omitempty"` + // If this field is set and is supported by the client, it will supersede the value of + // :ref:`lb_policy`. + LoadBalancingPolicy *LoadBalancingPolicy `protobuf:"bytes,41,opt,name=load_balancing_policy,json=loadBalancingPolicy,proto3" json:"load_balancing_policy,omitempty"` + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + LrsServer *v32.ConfigSource `protobuf:"bytes,42,opt,name=lrs_server,json=lrsServer,proto3" json:"lrs_server,omitempty"` + // [#not-implemented-hide:] + // A list of metric names from ORCA load reports to propagate to LRS. + // + // For map fields in the ORCA proto, the string will be of the form “.“. + // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA + // “named_metrics“ field. + // + // The special map key “*“ means to report all entries in the map (e.g., “named_metrics.*“ means to + // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted + // backends. + // + // The metric names in LRS will follow the same semantics as this field. In other words, if this field + // contains “named_metrics.foo“, then the LRS load report will include the data with that same string + // as the key. + LrsReportEndpointMetrics []string `protobuf:"bytes,57,rep,name=lrs_report_endpoint_metrics,json=lrsReportEndpointMetrics,proto3" json:"lrs_report_endpoint_metrics,omitempty"` + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + // + // .. attention:: + // + // This field has been deprecated in favor of ``timeout_budgets``, part of + // :ref:`track_cluster_stats `. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + TrackTimeoutBudgets bool `protobuf:"varint,47,opt,name=track_timeout_budgets,json=trackTimeoutBudgets,proto3" json:"track_timeout_budgets,omitempty"` + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from “http2_protocol_options“ + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + // [#extension-category: envoy.upstreams] + UpstreamConfig *v32.TypedExtensionConfig `protobuf:"bytes,48,opt,name=upstream_config,json=upstreamConfig,proto3" json:"upstream_config,omitempty"` + // Configuration to track optional cluster stats. + TrackClusterStats *TrackClusterStats `protobuf:"bytes,49,opt,name=track_cluster_stats,json=trackClusterStats,proto3" json:"track_cluster_stats,omitempty"` + // Preconnect configuration for this cluster. + PreconnectPolicy *Cluster_PreconnectPolicy `protobuf:"bytes,50,opt,name=preconnect_policy,json=preconnectPolicy,proto3" json:"preconnect_policy,omitempty"` + // If “connection_pool_per_downstream_connection“ is true, the cluster will use a separate + // connection pool for every downstream connection + ConnectionPoolPerDownstreamConnection bool `protobuf:"varint,51,opt,name=connection_pool_per_downstream_connection,json=connectionPoolPerDownstreamConnection,proto3" json:"connection_pool_per_downstream_connection,omitempty"` +} + +func (x *Cluster) Reset() { + *x = Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster) ProtoMessage() {} + +func (x *Cluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster.ProtoReflect.Descriptor instead. +func (*Cluster) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1} +} + +func (x *Cluster) GetTransportSocketMatches() []*Cluster_TransportSocketMatch { + if x != nil { + return x.TransportSocketMatches + } + return nil +} + +func (x *Cluster) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster) GetAltStatName() string { + if x != nil { + return x.AltStatName + } + return "" +} + +func (m *Cluster) GetClusterDiscoveryType() isCluster_ClusterDiscoveryType { + if m != nil { + return m.ClusterDiscoveryType + } + return nil +} + +func (x *Cluster) GetType() Cluster_DiscoveryType { + if x, ok := x.GetClusterDiscoveryType().(*Cluster_Type); ok { + return x.Type + } + return Cluster_STATIC +} + +func (x *Cluster) GetClusterType() *Cluster_CustomClusterType { + if x, ok := x.GetClusterDiscoveryType().(*Cluster_ClusterType); ok { + return x.ClusterType + } + return nil +} + +func (x *Cluster) GetEdsClusterConfig() *Cluster_EdsClusterConfig { + if x != nil { + return x.EdsClusterConfig + } + return nil +} + +func (x *Cluster) GetConnectTimeout() *durationpb.Duration { + if x != nil { + return x.ConnectTimeout + } + return nil +} + +func (x *Cluster) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerConnectionBufferLimitBytes + } + return nil +} + +func (x *Cluster) GetLbPolicy() Cluster_LbPolicy { + if x != nil { + return x.LbPolicy + } + return Cluster_ROUND_ROBIN +} + +func (x *Cluster) GetLoadAssignment() *v31.ClusterLoadAssignment { + if x != nil { + return x.LoadAssignment + } + return nil +} + +func (x *Cluster) GetHealthChecks() []*v32.HealthCheck { + if x != nil { + return x.HealthChecks + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestsPerConnection + } + return nil +} + +func (x *Cluster) GetCircuitBreakers() *CircuitBreakers { + if x != nil { + return x.CircuitBreakers + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetUpstreamHttpProtocolOptions() *v32.UpstreamHttpProtocolOptions { + if x != nil { + return x.UpstreamHttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetCommonHttpProtocolOptions() *v32.HttpProtocolOptions { + if x != nil { + return x.CommonHttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetHttpProtocolOptions() *v32.Http1ProtocolOptions { + if x != nil { + return x.HttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetHttp2ProtocolOptions() *v32.Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*anypb.Any { + if x != nil { + return x.TypedExtensionProtocolOptions + } + return nil +} + +func (x *Cluster) GetDnsRefreshRate() *durationpb.Duration { + if x != nil { + return x.DnsRefreshRate + } + return nil +} + +func (x *Cluster) GetDnsFailureRefreshRate() *Cluster_RefreshRate { + if x != nil { + return x.DnsFailureRefreshRate + } + return nil +} + +func (x *Cluster) GetRespectDnsTtl() bool { + if x != nil { + return x.RespectDnsTtl + } + return false +} + +func (x *Cluster) GetDnsLookupFamily() Cluster_DnsLookupFamily { + if x != nil { + return x.DnsLookupFamily + } + return Cluster_AUTO +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetDnsResolvers() []*v32.Address { + if x != nil { + return x.DnsResolvers + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetDnsResolutionConfig() *v32.DnsResolutionConfig { + if x != nil { + return x.DnsResolutionConfig + } + return nil +} + +func (x *Cluster) GetTypedDnsResolverConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.TypedDnsResolverConfig + } + return nil +} + +func (x *Cluster) GetWaitForWarmOnInit() *wrapperspb.BoolValue { + if x != nil { + return x.WaitForWarmOnInit + } + return nil +} + +func (x *Cluster) GetOutlierDetection() *OutlierDetection { + if x != nil { + return x.OutlierDetection + } + return nil +} + +func (x *Cluster) GetCleanupInterval() *durationpb.Duration { + if x != nil { + return x.CleanupInterval + } + return nil +} + +func (x *Cluster) GetUpstreamBindConfig() *v32.BindConfig { + if x != nil { + return x.UpstreamBindConfig + } + return nil +} + +func (x *Cluster) GetLbSubsetConfig() *Cluster_LbSubsetConfig { + if x != nil { + return x.LbSubsetConfig + } + return nil +} + +func (m *Cluster) GetLbConfig() isCluster_LbConfig { + if m != nil { + return m.LbConfig + } + return nil +} + +func (x *Cluster) GetRingHashLbConfig() *Cluster_RingHashLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_RingHashLbConfig_); ok { + return x.RingHashLbConfig + } + return nil +} + +func (x *Cluster) GetMaglevLbConfig() *Cluster_MaglevLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_MaglevLbConfig_); ok { + return x.MaglevLbConfig + } + return nil +} + +func (x *Cluster) GetOriginalDstLbConfig() *Cluster_OriginalDstLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_OriginalDstLbConfig_); ok { + return x.OriginalDstLbConfig + } + return nil +} + +func (x *Cluster) GetLeastRequestLbConfig() *Cluster_LeastRequestLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_LeastRequestLbConfig_); ok { + return x.LeastRequestLbConfig + } + return nil +} + +func (x *Cluster) GetRoundRobinLbConfig() *Cluster_RoundRobinLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_RoundRobinLbConfig_); ok { + return x.RoundRobinLbConfig + } + return nil +} + +func (x *Cluster) GetCommonLbConfig() *Cluster_CommonLbConfig { + if x != nil { + return x.CommonLbConfig + } + return nil +} + +func (x *Cluster) GetTransportSocket() *v32.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +func (x *Cluster) GetMetadata() *v32.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetProtocolSelection() Cluster_ClusterProtocolSelection { + if x != nil { + return x.ProtocolSelection + } + return Cluster_USE_CONFIGURED_PROTOCOL +} + +func (x *Cluster) GetUpstreamConnectionOptions() *UpstreamConnectionOptions { + if x != nil { + return x.UpstreamConnectionOptions + } + return nil +} + +func (x *Cluster) GetCloseConnectionsOnHostHealthFailure() bool { + if x != nil { + return x.CloseConnectionsOnHostHealthFailure + } + return false +} + +func (x *Cluster) GetIgnoreHealthOnHostRemoval() bool { + if x != nil { + return x.IgnoreHealthOnHostRemoval + } + return false +} + +func (x *Cluster) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +func (x *Cluster) GetLoadBalancingPolicy() *LoadBalancingPolicy { + if x != nil { + return x.LoadBalancingPolicy + } + return nil +} + +func (x *Cluster) GetLrsServer() *v32.ConfigSource { + if x != nil { + return x.LrsServer + } + return nil +} + +func (x *Cluster) GetLrsReportEndpointMetrics() []string { + if x != nil { + return x.LrsReportEndpointMetrics + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetTrackTimeoutBudgets() bool { + if x != nil { + return x.TrackTimeoutBudgets + } + return false +} + +func (x *Cluster) GetUpstreamConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.UpstreamConfig + } + return nil +} + +func (x *Cluster) GetTrackClusterStats() *TrackClusterStats { + if x != nil { + return x.TrackClusterStats + } + return nil +} + +func (x *Cluster) GetPreconnectPolicy() *Cluster_PreconnectPolicy { + if x != nil { + return x.PreconnectPolicy + } + return nil +} + +func (x *Cluster) GetConnectionPoolPerDownstreamConnection() bool { + if x != nil { + return x.ConnectionPoolPerDownstreamConnection + } + return false +} + +type isCluster_ClusterDiscoveryType interface { + isCluster_ClusterDiscoveryType() +} + +type Cluster_Type struct { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + Type Cluster_DiscoveryType `protobuf:"varint,2,opt,name=type,proto3,enum=envoy.config.cluster.v3.Cluster_DiscoveryType,oneof"` +} + +type Cluster_ClusterType struct { + // The custom cluster type. + ClusterType *Cluster_CustomClusterType `protobuf:"bytes,38,opt,name=cluster_type,json=clusterType,proto3,oneof"` +} + +func (*Cluster_Type) isCluster_ClusterDiscoveryType() {} + +func (*Cluster_ClusterType) isCluster_ClusterDiscoveryType() {} + +type isCluster_LbConfig interface { + isCluster_LbConfig() +} + +type Cluster_RingHashLbConfig_ struct { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig *Cluster_RingHashLbConfig `protobuf:"bytes,23,opt,name=ring_hash_lb_config,json=ringHashLbConfig,proto3,oneof"` +} + +type Cluster_MaglevLbConfig_ struct { + // Optional configuration for the Maglev load balancing policy. + MaglevLbConfig *Cluster_MaglevLbConfig `protobuf:"bytes,52,opt,name=maglev_lb_config,json=maglevLbConfig,proto3,oneof"` +} + +type Cluster_OriginalDstLbConfig_ struct { + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig *Cluster_OriginalDstLbConfig `protobuf:"bytes,34,opt,name=original_dst_lb_config,json=originalDstLbConfig,proto3,oneof"` +} + +type Cluster_LeastRequestLbConfig_ struct { + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig *Cluster_LeastRequestLbConfig `protobuf:"bytes,37,opt,name=least_request_lb_config,json=leastRequestLbConfig,proto3,oneof"` +} + +type Cluster_RoundRobinLbConfig_ struct { + // Optional configuration for the RoundRobin load balancing policy. + RoundRobinLbConfig *Cluster_RoundRobinLbConfig `protobuf:"bytes,56,opt,name=round_robin_lb_config,json=roundRobinLbConfig,proto3,oneof"` +} + +func (*Cluster_RingHashLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_MaglevLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_OriginalDstLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_LeastRequestLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_RoundRobinLbConfig_) isCluster_LbConfig() {} + +// Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +type LoadBalancingPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + Policies []*LoadBalancingPolicy_Policy `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *LoadBalancingPolicy) Reset() { + *x = LoadBalancingPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalancingPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalancingPolicy) ProtoMessage() {} + +func (x *LoadBalancingPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalancingPolicy.ProtoReflect.Descriptor instead. +func (*LoadBalancingPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{2} +} + +func (x *LoadBalancingPolicy) GetPolicies() []*LoadBalancingPolicy_Policy { + if x != nil { + return x.Policies + } + return nil +} + +type UpstreamConnectionOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + TcpKeepalive *v32.TcpKeepalive `protobuf:"bytes,1,opt,name=tcp_keepalive,json=tcpKeepalive,proto3" json:"tcp_keepalive,omitempty"` + // If enabled, associates the interface name of the local address with the upstream connection. + // This can be used by extensions during processing of requests. The association mechanism is + // implementation specific. Defaults to false due to performance concerns. + SetLocalInterfaceNameOnUpstreamConnections bool `protobuf:"varint,2,opt,name=set_local_interface_name_on_upstream_connections,json=setLocalInterfaceNameOnUpstreamConnections,proto3" json:"set_local_interface_name_on_upstream_connections,omitempty"` + // Configurations for happy eyeballs algorithm. + // Add configs for first_address_family_version and first_address_family_count + // when sorting destination ip addresses. + HappyEyeballsConfig *UpstreamConnectionOptions_HappyEyeballsConfig `protobuf:"bytes,3,opt,name=happy_eyeballs_config,json=happyEyeballsConfig,proto3" json:"happy_eyeballs_config,omitempty"` +} + +func (x *UpstreamConnectionOptions) Reset() { + *x = UpstreamConnectionOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConnectionOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConnectionOptions) ProtoMessage() {} + +func (x *UpstreamConnectionOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConnectionOptions.ProtoReflect.Descriptor instead. +func (*UpstreamConnectionOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3} +} + +func (x *UpstreamConnectionOptions) GetTcpKeepalive() *v32.TcpKeepalive { + if x != nil { + return x.TcpKeepalive + } + return nil +} + +func (x *UpstreamConnectionOptions) GetSetLocalInterfaceNameOnUpstreamConnections() bool { + if x != nil { + return x.SetLocalInterfaceNameOnUpstreamConnections + } + return false +} + +func (x *UpstreamConnectionOptions) GetHappyEyeballsConfig() *UpstreamConnectionOptions_HappyEyeballsConfig { + if x != nil { + return x.HappyEyeballsConfig + } + return nil +} + +type TrackClusterStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + TimeoutBudgets bool `protobuf:"varint,1,opt,name=timeout_budgets,json=timeoutBudgets,proto3" json:"timeout_budgets,omitempty"` + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + RequestResponseSizes bool `protobuf:"varint,2,opt,name=request_response_sizes,json=requestResponseSizes,proto3" json:"request_response_sizes,omitempty"` + // If true, some stats will be emitted per-endpoint, similar to the stats in admin “/clusters“ + // output. + // + // This does not currently output correct stats during a hot-restart. + // + // This is not currently implemented by all stat sinks. + // + // These stats do not honor filtering or tag extraction rules in :ref:`StatsConfig + // ` (but fixed-value tags are supported). Admin + // endpoint filtering is supported. + // + // This may not be used at the same time as + // :ref:`load_stats_config `. + PerEndpointStats bool `protobuf:"varint,3,opt,name=per_endpoint_stats,json=perEndpointStats,proto3" json:"per_endpoint_stats,omitempty"` +} + +func (x *TrackClusterStats) Reset() { + *x = TrackClusterStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TrackClusterStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrackClusterStats) ProtoMessage() {} + +func (x *TrackClusterStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrackClusterStats.ProtoReflect.Descriptor instead. +func (*TrackClusterStats) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{4} +} + +func (x *TrackClusterStats) GetTimeoutBudgets() bool { + if x != nil { + return x.TimeoutBudgets + } + return false +} + +func (x *TrackClusterStats) GetRequestResponseSizes() bool { + if x != nil { + return x.RequestResponseSizes + } + return false +} + +func (x *TrackClusterStats) GetPerEndpointStats() bool { + if x != nil { + return x.PerEndpointStats + } + return false +} + +// TransportSocketMatch specifies what transport socket config will be used +// when the match conditions are satisfied. +type Cluster_TransportSocketMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the match, used in stats generation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in “envoy.transport_socket_match“ is used to match + // against the values specified in this field. + Match *structpb.Struct `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` + // The configuration of the transport socket. + // [#extension-category: envoy.transport_sockets.upstream] + TransportSocket *v32.TransportSocket `protobuf:"bytes,3,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` +} + +func (x *Cluster_TransportSocketMatch) Reset() { + *x = Cluster_TransportSocketMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_TransportSocketMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_TransportSocketMatch) ProtoMessage() {} + +func (x *Cluster_TransportSocketMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_TransportSocketMatch.ProtoReflect.Descriptor instead. +func (*Cluster_TransportSocketMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Cluster_TransportSocketMatch) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster_TransportSocketMatch) GetMatch() *structpb.Struct { + if x != nil { + return x.Match + } + return nil +} + +func (x *Cluster_TransportSocketMatch) GetTransportSocket() *v32.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +// Extended cluster type. +type Cluster_CustomClusterType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the cluster to instantiate. The name must match a supported cluster type. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + // [#extension-category: envoy.clusters] + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *Cluster_CustomClusterType) Reset() { + *x = Cluster_CustomClusterType{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CustomClusterType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CustomClusterType) ProtoMessage() {} + +func (x *Cluster_CustomClusterType) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CustomClusterType.ProtoReflect.Descriptor instead. +func (*Cluster_CustomClusterType) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Cluster_CustomClusterType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster_CustomClusterType) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// Only valid when discovery type is EDS. +type Cluster_EdsClusterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for the source of EDS updates for this Cluster. + EdsConfig *v32.ConfigSource `protobuf:"bytes,1,opt,name=eds_config,json=edsConfig,proto3" json:"eds_config,omitempty"` + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. This may be a xdstp:// URL. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Cluster_EdsClusterConfig) Reset() { + *x = Cluster_EdsClusterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_EdsClusterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_EdsClusterConfig) ProtoMessage() {} + +func (x *Cluster_EdsClusterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_EdsClusterConfig.ProtoReflect.Descriptor instead. +func (*Cluster_EdsClusterConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Cluster_EdsClusterConfig) GetEdsConfig() *v32.ConfigSource { + if x != nil { + return x.EdsConfig + } + return nil +} + +func (x *Cluster_EdsClusterConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Optionally divide the endpoints in this cluster into subsets defined by +// endpoint metadata and selected by route and weighted cluster metadata. +// [#next-free-field: 9] +type Cluster_LbSubsetConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + FallbackPolicy Cluster_LbSubsetConfig_LbSubsetFallbackPolicy `protobuf:"varint,1,opt,name=fallback_policy,json=fallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetFallbackPolicy" json:"fallback_policy,omitempty"` + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the “envoy.lb“ + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + DefaultSubset *structpb.Struct `protobuf:"bytes,2,opt,name=default_subset,json=defaultSubset,proto3" json:"default_subset,omitempty"` + // For each entry, LbEndpoint.Metadata's + // “envoy.lb“ namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + SubsetSelectors []*Cluster_LbSubsetConfig_LbSubsetSelector `protobuf:"bytes,3,rep,name=subset_selectors,json=subsetSelectors,proto3" json:"subset_selectors,omitempty"` + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + LocalityWeightAware bool `protobuf:"varint,4,opt,name=locality_weight_aware,json=localityWeightAware,proto3" json:"locality_weight_aware,omitempty"` + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + ScaleLocalityWeight bool `protobuf:"varint,5,opt,name=scale_locality_weight,json=scaleLocalityWeight,proto3" json:"scale_locality_weight,omitempty"` + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + PanicModeAny bool `protobuf:"varint,6,opt,name=panic_mode_any,json=panicModeAny,proto3" json:"panic_mode_any,omitempty"` + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + ListAsAny bool `protobuf:"varint,7,opt,name=list_as_any,json=listAsAny,proto3" json:"list_as_any,omitempty"` + // Fallback mechanism that allows to try different route metadata until a host is found. + // If load balancing process, including all its mechanisms (like + // :ref:`fallback_policy`) + // fails to select a host, this policy decides if and how the process is repeated using another metadata. + // + // The value defaults to + // :ref:`METADATA_NO_FALLBACK`. + MetadataFallbackPolicy Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy `protobuf:"varint,8,opt,name=metadata_fallback_policy,json=metadataFallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy" json:"metadata_fallback_policy,omitempty"` +} + +func (x *Cluster_LbSubsetConfig) Reset() { + *x = Cluster_LbSubsetConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LbSubsetConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LbSubsetConfig) ProtoMessage() {} + +func (x *Cluster_LbSubsetConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig.ProtoReflect.Descriptor instead. +func (*Cluster_LbSubsetConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Cluster_LbSubsetConfig) GetFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetFallbackPolicy { + if x != nil { + return x.FallbackPolicy + } + return Cluster_LbSubsetConfig_NO_FALLBACK +} + +func (x *Cluster_LbSubsetConfig) GetDefaultSubset() *structpb.Struct { + if x != nil { + return x.DefaultSubset + } + return nil +} + +func (x *Cluster_LbSubsetConfig) GetSubsetSelectors() []*Cluster_LbSubsetConfig_LbSubsetSelector { + if x != nil { + return x.SubsetSelectors + } + return nil +} + +func (x *Cluster_LbSubsetConfig) GetLocalityWeightAware() bool { + if x != nil { + return x.LocalityWeightAware + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetScaleLocalityWeight() bool { + if x != nil { + return x.ScaleLocalityWeight + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetPanicModeAny() bool { + if x != nil { + return x.PanicModeAny + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetListAsAny() bool { + if x != nil { + return x.ListAsAny + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetMetadataFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + if x != nil { + return x.MetadataFallbackPolicy + } + return Cluster_LbSubsetConfig_METADATA_NO_FALLBACK +} + +// Configuration for :ref:`slow start mode `. +type Cluster_SlowStartConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // “new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))“, + // where “time_factor=(time_since_start_seconds / slow_start_time_seconds)“. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + Aggression *v32.RuntimeDouble `protobuf:"bytes,2,opt,name=aggression,proto3" json:"aggression,omitempty"` + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + MinWeightPercent *v33.Percent `protobuf:"bytes,3,opt,name=min_weight_percent,json=minWeightPercent,proto3" json:"min_weight_percent,omitempty"` +} + +func (x *Cluster_SlowStartConfig) Reset() { + *x = Cluster_SlowStartConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_SlowStartConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_SlowStartConfig) ProtoMessage() {} + +func (x *Cluster_SlowStartConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_SlowStartConfig.ProtoReflect.Descriptor instead. +func (*Cluster_SlowStartConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Cluster_SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { + if x != nil { + return x.SlowStartWindow + } + return nil +} + +func (x *Cluster_SlowStartConfig) GetAggression() *v32.RuntimeDouble { + if x != nil { + return x.Aggression + } + return nil +} + +func (x *Cluster_SlowStartConfig) GetMinWeightPercent() *v33.Percent { + if x != nil { + return x.MinWeightPercent + } + return nil +} + +// Specific configuration for the RoundRobin load balancing policy. +type Cluster_RoundRobinLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *Cluster_SlowStartConfig `protobuf:"bytes,1,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` +} + +func (x *Cluster_RoundRobinLbConfig) Reset() { + *x = Cluster_RoundRobinLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RoundRobinLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RoundRobinLbConfig) ProtoMessage() {} + +func (x *Cluster_RoundRobinLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RoundRobinLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_RoundRobinLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Cluster_RoundRobinLbConfig) GetSlowStartConfig() *Cluster_SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +// Specific configuration for the LeastRequest load balancing policy. +type Cluster_LeastRequestLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // “weight = load_balancing_weight / (active_requests + 1)^active_request_bias“ + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // “active_request_bias“ must be greater than or equal to 0.0. + // + // When “active_request_bias == 0.0“ the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When “active_request_bias > 0.0“ the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // + // This setting only takes effect if all host weights are not equal. + ActiveRequestBias *v32.RuntimeDouble `protobuf:"bytes,2,opt,name=active_request_bias,json=activeRequestBias,proto3" json:"active_request_bias,omitempty"` + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *Cluster_SlowStartConfig `protobuf:"bytes,3,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` +} + +func (x *Cluster_LeastRequestLbConfig) Reset() { + *x = Cluster_LeastRequestLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LeastRequestLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LeastRequestLbConfig) ProtoMessage() {} + +func (x *Cluster_LeastRequestLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LeastRequestLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_LeastRequestLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 6} +} + +func (x *Cluster_LeastRequestLbConfig) GetChoiceCount() *wrapperspb.UInt32Value { + if x != nil { + return x.ChoiceCount + } + return nil +} + +func (x *Cluster_LeastRequestLbConfig) GetActiveRequestBias() *v32.RuntimeDouble { + if x != nil { + return x.ActiveRequestBias + } + return nil +} + +func (x *Cluster_LeastRequestLbConfig) GetSlowStartConfig() *Cluster_SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +// Specific configuration for the :ref:`RingHash` +// load balancing policy. +type Cluster_RingHashLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction Cluster_RingHashLbConfig_HashFunction `protobuf:"varint,3,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.config.cluster.v3.Cluster_RingHashLbConfig_HashFunction" json:"hash_function,omitempty"` + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` +} + +func (x *Cluster_RingHashLbConfig) Reset() { + *x = Cluster_RingHashLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RingHashLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RingHashLbConfig) ProtoMessage() {} + +func (x *Cluster_RingHashLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RingHashLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_RingHashLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7} +} + +func (x *Cluster_RingHashLbConfig) GetMinimumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinimumRingSize + } + return nil +} + +func (x *Cluster_RingHashLbConfig) GetHashFunction() Cluster_RingHashLbConfig_HashFunction { + if x != nil { + return x.HashFunction + } + return Cluster_RingHashLbConfig_XX_HASH +} + +func (x *Cluster_RingHashLbConfig) GetMaximumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaximumRingSize + } + return nil +} + +// Specific configuration for the :ref:`Maglev` +// load balancing policy. +type Cluster_MaglevLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + // Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same + // upstream as it was before. Increasing the table size reduces the amount of disruption. + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + TableSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=table_size,json=tableSize,proto3" json:"table_size,omitempty"` +} + +func (x *Cluster_MaglevLbConfig) Reset() { + *x = Cluster_MaglevLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_MaglevLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_MaglevLbConfig) ProtoMessage() {} + +func (x *Cluster_MaglevLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_MaglevLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_MaglevLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 8} +} + +func (x *Cluster_MaglevLbConfig) GetTableSize() *wrapperspb.UInt64Value { + if x != nil { + return x.TableSize + } + return nil +} + +// Specific configuration for the +// :ref:`Original Destination ` +// load balancing policy. +// [#extension: envoy.clusters.original_dst] +type Cluster_OriginalDstLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When true, a HTTP header can be used to override the original dst address. The default header is + // :ref:`x-envoy-original-dst-host `. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + UseHttpHeader bool `protobuf:"varint,1,opt,name=use_http_header,json=useHttpHeader,proto3" json:"use_http_header,omitempty"` + // The http header to override destination address if :ref:`use_http_header `. + // is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. + HttpHeaderName string `protobuf:"bytes,2,opt,name=http_header_name,json=httpHeaderName,proto3" json:"http_header_name,omitempty"` + // The port to override for the original dst address. This port + // will take precedence over filter state and header override ports + UpstreamPortOverride *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=upstream_port_override,json=upstreamPortOverride,proto3" json:"upstream_port_override,omitempty"` + // The dynamic metadata key to override destination address. + // First the request metadata is considered, then the connection one. + MetadataKey *v34.MetadataKey `protobuf:"bytes,4,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` +} + +func (x *Cluster_OriginalDstLbConfig) Reset() { + *x = Cluster_OriginalDstLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_OriginalDstLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_OriginalDstLbConfig) ProtoMessage() {} + +func (x *Cluster_OriginalDstLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_OriginalDstLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_OriginalDstLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 9} +} + +func (x *Cluster_OriginalDstLbConfig) GetUseHttpHeader() bool { + if x != nil { + return x.UseHttpHeader + } + return false +} + +func (x *Cluster_OriginalDstLbConfig) GetHttpHeaderName() string { + if x != nil { + return x.HttpHeaderName + } + return "" +} + +func (x *Cluster_OriginalDstLbConfig) GetUpstreamPortOverride() *wrapperspb.UInt32Value { + if x != nil { + return x.UpstreamPortOverride + } + return nil +} + +func (x *Cluster_OriginalDstLbConfig) GetMetadataKey() *v34.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +// Common configuration for all load balancer implementations. +// [#next-free-field: 9] +type Cluster_CommonLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // + // The specified percent will be truncated to the nearest 1%. + HealthyPanicThreshold *v33.Percent `protobuf:"bytes,1,opt,name=healthy_panic_threshold,json=healthyPanicThreshold,proto3" json:"healthy_panic_threshold,omitempty"` + // Types that are assignable to LocalityConfigSpecifier: + // + // *Cluster_CommonLbConfig_ZoneAwareLbConfig_ + // *Cluster_CommonLbConfig_LocalityWeightedLbConfig_ + LocalityConfigSpecifier isCluster_CommonLbConfig_LocalityConfigSpecifier `protobuf_oneof:"locality_config_specifier"` + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + UpdateMergeWindow *durationpb.Duration `protobuf:"bytes,4,opt,name=update_merge_window,json=updateMergeWindow,proto3" json:"update_merge_window,omitempty"` + // If set to true, Envoy will :ref:`exclude ` new hosts + // when computing load balancing weights until they have been health checked for the first time. + // This will have no effect unless active health checking is also configured. + IgnoreNewHostsUntilFirstHc bool `protobuf:"varint,5,opt,name=ignore_new_hosts_until_first_hc,json=ignoreNewHostsUntilFirstHc,proto3" json:"ignore_new_hosts_until_first_hc,omitempty"` + // If set to “true“, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + CloseConnectionsOnHostSetChange bool `protobuf:"varint,6,opt,name=close_connections_on_host_set_change,json=closeConnectionsOnHostSetChange,proto3" json:"close_connections_on_host_set_change,omitempty"` + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig *Cluster_CommonLbConfig_ConsistentHashingLbConfig `protobuf:"bytes,7,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` + // This controls what hosts are considered valid when using + // :ref:`host overrides `, which is used by some + // filters to modify the load balancing decision. + // + // If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is + // set with an empty set of statuses then host overrides will be ignored by the load balancing. + OverrideHostStatus *v32.HealthStatusSet `protobuf:"bytes,8,opt,name=override_host_status,json=overrideHostStatus,proto3" json:"override_host_status,omitempty"` +} + +func (x *Cluster_CommonLbConfig) Reset() { + *x = Cluster_CommonLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10} +} + +func (x *Cluster_CommonLbConfig) GetHealthyPanicThreshold() *v33.Percent { + if x != nil { + return x.HealthyPanicThreshold + } + return nil +} + +func (m *Cluster_CommonLbConfig) GetLocalityConfigSpecifier() isCluster_CommonLbConfig_LocalityConfigSpecifier { + if m != nil { + return m.LocalityConfigSpecifier + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetZoneAwareLbConfig() *Cluster_CommonLbConfig_ZoneAwareLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*Cluster_CommonLbConfig_ZoneAwareLbConfig_); ok { + return x.ZoneAwareLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetLocalityWeightedLbConfig() *Cluster_CommonLbConfig_LocalityWeightedLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*Cluster_CommonLbConfig_LocalityWeightedLbConfig_); ok { + return x.LocalityWeightedLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetUpdateMergeWindow() *durationpb.Duration { + if x != nil { + return x.UpdateMergeWindow + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetIgnoreNewHostsUntilFirstHc() bool { + if x != nil { + return x.IgnoreNewHostsUntilFirstHc + } + return false +} + +func (x *Cluster_CommonLbConfig) GetCloseConnectionsOnHostSetChange() bool { + if x != nil { + return x.CloseConnectionsOnHostSetChange + } + return false +} + +func (x *Cluster_CommonLbConfig) GetConsistentHashingLbConfig() *Cluster_CommonLbConfig_ConsistentHashingLbConfig { + if x != nil { + return x.ConsistentHashingLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetOverrideHostStatus() *v32.HealthStatusSet { + if x != nil { + return x.OverrideHostStatus + } + return nil +} + +type isCluster_CommonLbConfig_LocalityConfigSpecifier interface { + isCluster_CommonLbConfig_LocalityConfigSpecifier() +} + +type Cluster_CommonLbConfig_ZoneAwareLbConfig_ struct { + ZoneAwareLbConfig *Cluster_CommonLbConfig_ZoneAwareLbConfig `protobuf:"bytes,2,opt,name=zone_aware_lb_config,json=zoneAwareLbConfig,proto3,oneof"` +} + +type Cluster_CommonLbConfig_LocalityWeightedLbConfig_ struct { + LocalityWeightedLbConfig *Cluster_CommonLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,3,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3,oneof"` +} + +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig_) isCluster_CommonLbConfig_LocalityConfigSpecifier() { +} + +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig_) isCluster_CommonLbConfig_LocalityConfigSpecifier() { +} + +type Cluster_RefreshRate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *Cluster_RefreshRate) Reset() { + *x = Cluster_RefreshRate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RefreshRate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RefreshRate) ProtoMessage() {} + +func (x *Cluster_RefreshRate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RefreshRate.ProtoReflect.Descriptor instead. +func (*Cluster_RefreshRate) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 11} +} + +func (x *Cluster_RefreshRate) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *Cluster_RefreshRate) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +type Cluster_PreconnectPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates how many streams (rounded up) can be anticipated per-upstream for each + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting + // will only be done if the upstream is healthy and the cluster has traffic. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections preconnected. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. + // + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + PerUpstreamPreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,1,opt,name=per_upstream_preconnect_ratio,json=perUpstreamPreconnectRatio,proto3" json:"per_upstream_preconnect_ratio,omitempty"` + // Indicates how many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike “per_upstream_preconnect_ratio“ this preconnects across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will + // only be done if there are healthy upstreams and the cluster has traffic. + // + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each + // upstream. + PredictivePreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=predictive_preconnect_ratio,json=predictivePreconnectRatio,proto3" json:"predictive_preconnect_ratio,omitempty"` +} + +func (x *Cluster_PreconnectPolicy) Reset() { + *x = Cluster_PreconnectPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_PreconnectPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_PreconnectPolicy) ProtoMessage() {} + +func (x *Cluster_PreconnectPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_PreconnectPolicy.ProtoReflect.Descriptor instead. +func (*Cluster_PreconnectPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 12} +} + +func (x *Cluster_PreconnectPolicy) GetPerUpstreamPreconnectRatio() *wrapperspb.DoubleValue { + if x != nil { + return x.PerUpstreamPreconnectRatio + } + return nil +} + +func (x *Cluster_PreconnectPolicy) GetPredictivePreconnectRatio() *wrapperspb.DoubleValue { + if x != nil { + return x.PredictivePreconnectRatio + } + return nil +} + +// Specifications for subsets. +type Cluster_LbSubsetConfig_LbSubsetSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of keys to match with the weighted cluster metadata. + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for + // choosing a host, but updating hosts is faster, especially for large numbers of hosts. + // + // If a match is found to a host, that host will be used regardless of priority levels. + // + // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in “keys“ + // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge + // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are + // present in the current configuration. + SingleHostPerSubset bool `protobuf:"varint,4,opt,name=single_host_per_subset,json=singleHostPerSubset,proto3" json:"single_host_per_subset,omitempty"` + // The behavior used when no endpoint subset matches the selected route's + // metadata. + FallbackPolicy Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy `protobuf:"varint,2,opt,name=fallback_policy,json=fallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy" json:"fallback_policy,omitempty"` + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // “fallback_keys_subset“ cannot be equal to “keys“. + FallbackKeysSubset []string `protobuf:"bytes,3,rep,name=fallback_keys_subset,json=fallbackKeysSubset,proto3" json:"fallback_keys_subset,omitempty"` +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) Reset() { + *x = Cluster_LbSubsetConfig_LbSubsetSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LbSubsetConfig_LbSubsetSelector) ProtoMessage() {} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetSelector.ProtoReflect.Descriptor instead. +func (*Cluster_LbSubsetConfig_LbSubsetSelector) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0} +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetSingleHostPerSubset() bool { + if x != nil { + return x.SingleHostPerSubset + } + return false +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy { + if x != nil { + return x.FallbackPolicy + } + return Cluster_LbSubsetConfig_LbSubsetSelector_NOT_DEFINED +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetFallbackKeysSubset() []string { + if x != nil { + return x.FallbackKeysSubset + } + return nil +} + +// Configuration for :ref:`zone aware routing +// `. +type Cluster_CommonLbConfig_ZoneAwareLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + RoutingEnabled *v33.Percent `protobuf:"bytes,1,opt,name=routing_enabled,json=routingEnabled,proto3" json:"routing_enabled,omitempty"` + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + FailTrafficOnPanic bool `protobuf:"varint,3,opt,name=fail_traffic_on_panic,json=failTrafficOnPanic,proto3" json:"fail_traffic_on_panic,omitempty"` +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) Reset() { + *x = Cluster_CommonLbConfig_ZoneAwareLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_ZoneAwareLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 0} +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v33.Percent { + if x != nil { + return x.RoutingEnabled + } + return nil +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinClusterSize + } + return nil +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetFailTrafficOnPanic() bool { + if x != nil { + return x.FailTrafficOnPanic + } + return false +} + +// Configuration for :ref:`locality weighted load balancing +// ` +type Cluster_CommonLbConfig_LocalityWeightedLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) Reset() { + *x = Cluster_CommonLbConfig_LocalityWeightedLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_LocalityWeightedLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 1} +} + +// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) +type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Reset() { + *x = Cluster_CommonLbConfig_ConsistentHashingLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_ConsistentHashingLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_ConsistentHashingLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 2} +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +type LoadBalancingPolicy_Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#extension-category: envoy.load_balancing_policies] + TypedExtensionConfig *v32.TypedExtensionConfig `protobuf:"bytes,4,opt,name=typed_extension_config,json=typedExtensionConfig,proto3" json:"typed_extension_config,omitempty"` +} + +func (x *LoadBalancingPolicy_Policy) Reset() { + *x = LoadBalancingPolicy_Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalancingPolicy_Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalancingPolicy_Policy) ProtoMessage() {} + +func (x *LoadBalancingPolicy_Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalancingPolicy_Policy.ProtoReflect.Descriptor instead. +func (*LoadBalancingPolicy_Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *LoadBalancingPolicy_Policy) GetTypedExtensionConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.TypedExtensionConfig + } + return nil +} + +type UpstreamConnectionOptions_HappyEyeballsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify the IP address family to attempt connection first in happy + // eyeballs algorithm according to RFC8305#section-4. + FirstAddressFamilyVersion UpstreamConnectionOptions_FirstAddressFamilyVersion `protobuf:"varint,1,opt,name=first_address_family_version,json=firstAddressFamilyVersion,proto3,enum=envoy.config.cluster.v3.UpstreamConnectionOptions_FirstAddressFamilyVersion" json:"first_address_family_version,omitempty"` + // Specify the number of addresses of the first_address_family_version being + // attempted for connection before the other address family. + FirstAddressFamilyCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=first_address_family_count,json=firstAddressFamilyCount,proto3" json:"first_address_family_count,omitempty"` +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) Reset() { + *x = UpstreamConnectionOptions_HappyEyeballsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConnectionOptions_HappyEyeballsConfig) ProtoMessage() {} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConnectionOptions_HappyEyeballsConfig.ProtoReflect.Descriptor instead. +func (*UpstreamConnectionOptions_HappyEyeballsConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyVersion() UpstreamConnectionOptions_FirstAddressFamilyVersion { + if x != nil { + return x.FirstAddressFamilyVersion + } + return UpstreamConnectionOptions_DEFAULT +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyCount() *wrapperspb.UInt32Value { + if x != nil { + return x.FirstAddressFamilyCount + } + return nil +} + +var File_envoy_config_cluster_v3_cluster_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x1a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x11, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd6, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x16, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3e, 0x0a, 0x0d, 0x61, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x14, 0x0a, + 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x52, 0x0b, 0x61, 0x6c, 0x74, 0x53, 0x74, 0x61, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x4e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x57, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x5f, 0x0a, 0x12, 0x65, 0x64, 0x73, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x6f, 0x0a, 0x21, 0x70, 0x65, 0x72, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x10, 0x01, 0x52, 0x1d, 0x70, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x09, 0x6c, 0x62, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x08, 0x6c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x0f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x68, 0x0a, + 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x18, 0x6d, + 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, + 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, + 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x83, 0x01, 0x0a, + 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x2e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x77, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6b, 0x0a, 0x15, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, + 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x12, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8c, + 0x01, 0x0a, 0x20, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1d, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x51, 0x0a, + 0x10, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, + 0x52, 0x0e, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x12, 0x65, 0x0a, 0x18, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x52, 0x15, 0x64, 0x6e, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x44, 0x6e, 0x73, 0x54, 0x74, 0x6c, 0x12, + 0x66, 0x0a, 0x11, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x66, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x44, 0x6e, 0x73, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x41, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, + 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x73, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, + 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x64, + 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x37, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x44, 0x6e, 0x73, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, + 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x57, 0x61, 0x72, 0x6d, 0x4f, 0x6e, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x56, 0x0a, 0x11, + 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x6c, 0x62, 0x5f, 0x73, + 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0e, 0x6c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x10, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x6d, 0x61, 0x67, 0x6c, 0x65, + 0x76, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x34, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x64, 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x13, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x25, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x14, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x68, 0x0a, 0x15, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, + 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x75, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1b, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x23, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4f, 0x6e, 0x48, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x28, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x72, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x72, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x39, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, + 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, + 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x32, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, 0x44, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, 0x0a, + 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0a, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, + 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, + 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x6b, + 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x61, + 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, + 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, + 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, + 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, + 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, + 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, + 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, + 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, + 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, + 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, + 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, + 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, + 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, + 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, + 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, + 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, + 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, + 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, + 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, + 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, + 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, + 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, + 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, + 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, + 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, + 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, + 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, + 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, + 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, + 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, + 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, + 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, + 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, + 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, + 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, + 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, + 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, + 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, + 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbb, 0x05, 0x0a, + 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x63, + 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x73, + 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x15, 0x68, 0x61, 0x70, + 0x70, 0x79, 0x5f, 0x65, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x61, 0x70, + 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x13, 0x68, 0x61, 0x70, 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x89, 0x02, 0x0a, 0x13, 0x48, 0x61, 0x70, 0x70, 0x79, 0x45, + 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x8d, 0x01, + 0x0a, 0x1c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, + 0x1a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x66, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x17, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x38, 0x0a, 0x19, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0b, + 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x36, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, + 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, + 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_cluster_v3_cluster_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_cluster_proto_rawDescData = file_envoy_config_cluster_v3_cluster_proto_rawDesc +) + +func file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_cluster_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_cluster_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_cluster_proto_rawDescData +} + +var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ + (Cluster_DiscoveryType)(0), // 0: envoy.config.cluster.v3.Cluster.DiscoveryType + (Cluster_LbPolicy)(0), // 1: envoy.config.cluster.v3.Cluster.LbPolicy + (Cluster_DnsLookupFamily)(0), // 2: envoy.config.cluster.v3.Cluster.DnsLookupFamily + (Cluster_ClusterProtocolSelection)(0), // 3: envoy.config.cluster.v3.Cluster.ClusterProtocolSelection + (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy)(0), // 4: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy)(0), // 5: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)(0), // 6: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + (Cluster_RingHashLbConfig_HashFunction)(0), // 7: envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + (UpstreamConnectionOptions_FirstAddressFamilyVersion)(0), // 8: envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + (*ClusterCollection)(nil), // 9: envoy.config.cluster.v3.ClusterCollection + (*Cluster)(nil), // 10: envoy.config.cluster.v3.Cluster + (*LoadBalancingPolicy)(nil), // 11: envoy.config.cluster.v3.LoadBalancingPolicy + (*UpstreamConnectionOptions)(nil), // 12: envoy.config.cluster.v3.UpstreamConnectionOptions + (*TrackClusterStats)(nil), // 13: envoy.config.cluster.v3.TrackClusterStats + (*Cluster_TransportSocketMatch)(nil), // 14: envoy.config.cluster.v3.Cluster.TransportSocketMatch + (*Cluster_CustomClusterType)(nil), // 15: envoy.config.cluster.v3.Cluster.CustomClusterType + (*Cluster_EdsClusterConfig)(nil), // 16: envoy.config.cluster.v3.Cluster.EdsClusterConfig + (*Cluster_LbSubsetConfig)(nil), // 17: envoy.config.cluster.v3.Cluster.LbSubsetConfig + (*Cluster_SlowStartConfig)(nil), // 18: envoy.config.cluster.v3.Cluster.SlowStartConfig + (*Cluster_RoundRobinLbConfig)(nil), // 19: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + (*Cluster_LeastRequestLbConfig)(nil), // 20: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + (*Cluster_RingHashLbConfig)(nil), // 21: envoy.config.cluster.v3.Cluster.RingHashLbConfig + (*Cluster_MaglevLbConfig)(nil), // 22: envoy.config.cluster.v3.Cluster.MaglevLbConfig + (*Cluster_OriginalDstLbConfig)(nil), // 23: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + (*Cluster_CommonLbConfig)(nil), // 24: envoy.config.cluster.v3.Cluster.CommonLbConfig + (*Cluster_RefreshRate)(nil), // 25: envoy.config.cluster.v3.Cluster.RefreshRate + (*Cluster_PreconnectPolicy)(nil), // 26: envoy.config.cluster.v3.Cluster.PreconnectPolicy + nil, // 27: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + (*Cluster_LbSubsetConfig_LbSubsetSelector)(nil), // 28: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + (*Cluster_CommonLbConfig_ZoneAwareLbConfig)(nil), // 29: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + (*Cluster_CommonLbConfig_LocalityWeightedLbConfig)(nil), // 30: envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + (*Cluster_CommonLbConfig_ConsistentHashingLbConfig)(nil), // 31: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + (*LoadBalancingPolicy_Policy)(nil), // 32: envoy.config.cluster.v3.LoadBalancingPolicy.Policy + (*UpstreamConnectionOptions_HappyEyeballsConfig)(nil), // 33: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + (*v3.CollectionEntry)(nil), // 34: xds.core.v3.CollectionEntry + (*durationpb.Duration)(nil), // 35: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 36: google.protobuf.UInt32Value + (*v31.ClusterLoadAssignment)(nil), // 37: envoy.config.endpoint.v3.ClusterLoadAssignment + (*v32.HealthCheck)(nil), // 38: envoy.config.core.v3.HealthCheck + (*CircuitBreakers)(nil), // 39: envoy.config.cluster.v3.CircuitBreakers + (*v32.UpstreamHttpProtocolOptions)(nil), // 40: envoy.config.core.v3.UpstreamHttpProtocolOptions + (*v32.HttpProtocolOptions)(nil), // 41: envoy.config.core.v3.HttpProtocolOptions + (*v32.Http1ProtocolOptions)(nil), // 42: envoy.config.core.v3.Http1ProtocolOptions + (*v32.Http2ProtocolOptions)(nil), // 43: envoy.config.core.v3.Http2ProtocolOptions + (*v32.Address)(nil), // 44: envoy.config.core.v3.Address + (*v32.DnsResolutionConfig)(nil), // 45: envoy.config.core.v3.DnsResolutionConfig + (*v32.TypedExtensionConfig)(nil), // 46: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 47: google.protobuf.BoolValue + (*OutlierDetection)(nil), // 48: envoy.config.cluster.v3.OutlierDetection + (*v32.BindConfig)(nil), // 49: envoy.config.core.v3.BindConfig + (*v32.TransportSocket)(nil), // 50: envoy.config.core.v3.TransportSocket + (*v32.Metadata)(nil), // 51: envoy.config.core.v3.Metadata + (*Filter)(nil), // 52: envoy.config.cluster.v3.Filter + (*v32.ConfigSource)(nil), // 53: envoy.config.core.v3.ConfigSource + (*v32.TcpKeepalive)(nil), // 54: envoy.config.core.v3.TcpKeepalive + (*structpb.Struct)(nil), // 55: google.protobuf.Struct + (*anypb.Any)(nil), // 56: google.protobuf.Any + (*v32.RuntimeDouble)(nil), // 57: envoy.config.core.v3.RuntimeDouble + (*v33.Percent)(nil), // 58: envoy.type.v3.Percent + (*wrapperspb.UInt64Value)(nil), // 59: google.protobuf.UInt64Value + (*v34.MetadataKey)(nil), // 60: envoy.type.metadata.v3.MetadataKey + (*v32.HealthStatusSet)(nil), // 61: envoy.config.core.v3.HealthStatusSet + (*wrapperspb.DoubleValue)(nil), // 62: google.protobuf.DoubleValue +} +var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ + 34, // 0: envoy.config.cluster.v3.ClusterCollection.entries:type_name -> xds.core.v3.CollectionEntry + 14, // 1: envoy.config.cluster.v3.Cluster.transport_socket_matches:type_name -> envoy.config.cluster.v3.Cluster.TransportSocketMatch + 0, // 2: envoy.config.cluster.v3.Cluster.type:type_name -> envoy.config.cluster.v3.Cluster.DiscoveryType + 15, // 3: envoy.config.cluster.v3.Cluster.cluster_type:type_name -> envoy.config.cluster.v3.Cluster.CustomClusterType + 16, // 4: envoy.config.cluster.v3.Cluster.eds_cluster_config:type_name -> envoy.config.cluster.v3.Cluster.EdsClusterConfig + 35, // 5: envoy.config.cluster.v3.Cluster.connect_timeout:type_name -> google.protobuf.Duration + 36, // 6: envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 1, // 7: envoy.config.cluster.v3.Cluster.lb_policy:type_name -> envoy.config.cluster.v3.Cluster.LbPolicy + 37, // 8: envoy.config.cluster.v3.Cluster.load_assignment:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment + 38, // 9: envoy.config.cluster.v3.Cluster.health_checks:type_name -> envoy.config.core.v3.HealthCheck + 36, // 10: envoy.config.cluster.v3.Cluster.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 39, // 11: envoy.config.cluster.v3.Cluster.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers + 40, // 12: envoy.config.cluster.v3.Cluster.upstream_http_protocol_options:type_name -> envoy.config.core.v3.UpstreamHttpProtocolOptions + 41, // 13: envoy.config.cluster.v3.Cluster.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 42, // 14: envoy.config.cluster.v3.Cluster.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 43, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 27, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + 35, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration + 25, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate + 2, // 19: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily + 44, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address + 45, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 46, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 47, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue + 48, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection + 35, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration + 49, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 17, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig + 21, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig + 22, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig + 23, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + 20, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + 19, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + 24, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig + 50, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 51, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata + 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection + 12, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions + 52, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter + 11, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 53, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource + 46, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats + 26, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy + 32, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy + 54, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive + 33, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.happy_eyeballs_config:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + 55, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct + 50, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 56, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any + 53, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource + 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + 55, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct + 28, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 5, // 54: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + 35, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 57, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 58, // 57: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 18, // 58: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 36, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value + 57, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 18, // 61: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 59, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 7, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + 59, // 64: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 59, // 65: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value + 36, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value + 60, // 67: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 58, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent + 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + 35, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration + 31, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + 61, // 73: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet + 35, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration + 35, // 75: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration + 62, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 62, // 77: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 56, // 78: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any + 6, // 79: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + 58, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 59, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 36, // 82: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 46, // 83: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 84: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_version:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + 36, // 85: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_count:type_name -> google.protobuf.UInt32Value + 86, // [86:86] is the sub-list for method output_type + 86, // [86:86] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_cluster_proto_init() } +func file_envoy_config_cluster_v3_cluster_proto_init() { + if File_envoy_config_cluster_v3_cluster_proto != nil { + return + } + file_envoy_config_cluster_v3_circuit_breaker_proto_init() + file_envoy_config_cluster_v3_filter_proto_init() + file_envoy_config_cluster_v3_outlier_detection_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancingPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConnectionOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TrackClusterStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_TransportSocketMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CustomClusterType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_EdsClusterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LbSubsetConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_SlowStartConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RoundRobinLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LeastRequestLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RingHashLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_MaglevLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_OriginalDstLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RefreshRate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_PreconnectPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LbSubsetConfig_LbSubsetSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_ZoneAwareLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_ConsistentHashingLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancingPolicy_Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConnectionOptions_HappyEyeballsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Cluster_Type)(nil), + (*Cluster_ClusterType)(nil), + (*Cluster_RingHashLbConfig_)(nil), + (*Cluster_MaglevLbConfig_)(nil), + (*Cluster_OriginalDstLbConfig_)(nil), + (*Cluster_LeastRequestLbConfig_)(nil), + (*Cluster_RoundRobinLbConfig_)(nil), + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*Cluster_CommonLbConfig_ZoneAwareLbConfig_)(nil), + (*Cluster_CommonLbConfig_LocalityWeightedLbConfig_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_cluster_proto_rawDesc, + NumEnums: 9, + NumMessages: 25, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_cluster_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_cluster_proto_depIdxs, + EnumInfos: file_envoy_config_cluster_v3_cluster_proto_enumTypes, + MessageInfos: file_envoy_config_cluster_v3_cluster_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_cluster_proto = out.File + file_envoy_config_cluster_v3_cluster_proto_rawDesc = nil + file_envoy_config_cluster_v3_cluster_proto_goTypes = nil + file_envoy_config_cluster_v3_cluster_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go new file mode 100644 index 000000000..7d33f5e84 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -0,0 +1,4942 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterCollection with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ClusterCollection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterCollectionMultiError, or nil if none found. +func (m *ClusterCollection) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterCollection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEntries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterCollectionMultiError(errors) + } + + return nil +} + +// ClusterCollectionMultiError is an error wrapping multiple validation errors +// returned by ClusterCollection.ValidateAll() if the designated constraints +// aren't met. +type ClusterCollectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterCollectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterCollectionMultiError) AllErrors() []error { return m } + +// ClusterCollectionValidationError is the validation error returned by +// ClusterCollection.Validate if the designated constraints aren't met. +type ClusterCollectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterCollectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterCollectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterCollectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterCollectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterCollectionValidationError) ErrorName() string { + return "ClusterCollectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterCollectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterCollection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterCollectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterCollectionValidationError{} + +// Validate checks the field values on Cluster with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in ClusterMultiError, or nil if none found. +func (m *Cluster) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetTransportSocketMatches() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ClusterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AltStatName + + if all { + switch v := interface{}(m.GetEdsClusterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEdsClusterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetConnectTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "ConnectTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "ConnectTimeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetPerConnectionBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerConnectionBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := Cluster_LbPolicy_name[int32(m.GetLbPolicy())]; !ok { + err := ClusterValidationError{ + field: "LbPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLoadAssignment()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadAssignment()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHealthChecks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxRequestsPerConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequestsPerConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCircuitBreakers()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedExtensionProtocolOptions())) + i := 0 + for key := range m.GetTypedExtensionProtocolOptions() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedExtensionProtocolOptions()[key] + _ = val + + // no validation rules for TypedExtensionProtocolOptions[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if d := m.GetDnsRefreshRate(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "DnsRefreshRate", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "DnsRefreshRate", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetDnsFailureRefreshRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsFailureRefreshRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RespectDnsTtl + + if _, ok := Cluster_DnsLookupFamily_name[int32(m.GetDnsLookupFamily())]; !ok { + err := ClusterValidationError{ + field: "DnsLookupFamily", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDnsResolvers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for UseTcpForDnsLookups + + if all { + switch v := interface{}(m.GetDnsResolutionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWaitForWarmOnInit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWaitForWarmOnInit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOutlierDetection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetCleanupInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "CleanupInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "CleanupInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetUpstreamBindConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLbSubsetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLbSubsetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProtocolSelection + + if all { + switch v := interface{}(m.GetUpstreamConnectionOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamConnectionOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CloseConnectionsOnHostHealthFailure + + // no validation rules for IgnoreHealthOnHostRemoval + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLoadBalancingPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadBalancingPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLrsServer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLrsServer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrackTimeoutBudgets + + if all { + switch v := interface{}(m.GetUpstreamConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTrackClusterStats()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrackClusterStats()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPreconnectPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPreconnectPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConnectionPoolPerDownstreamConnection + + switch v := m.ClusterDiscoveryType.(type) { + case *Cluster_Type: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := Cluster_DiscoveryType_name[int32(m.GetType())]; !ok { + err := ClusterValidationError{ + field: "Type", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Cluster_ClusterType: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetClusterType()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterType()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + switch v := m.LbConfig.(type) { + case *Cluster_RingHashLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRingHashLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRingHashLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_MaglevLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMaglevLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaglevLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_OriginalDstLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOriginalDstLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOriginalDstLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_LeastRequestLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLeastRequestLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLeastRequestLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_RoundRobinLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRoundRobinLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoundRobinLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ClusterMultiError(errors) + } + + return nil +} + +// ClusterMultiError is an error wrapping multiple validation errors returned +// by Cluster.ValidateAll() if the designated constraints aren't met. +type ClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterMultiError) AllErrors() []error { return m } + +// ClusterValidationError is the validation error returned by Cluster.Validate +// if the designated constraints aren't met. +type ClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterValidationError) ErrorName() string { return "ClusterValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterValidationError{} + +// Validate checks the field values on LoadBalancingPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LoadBalancingPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadBalancingPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadBalancingPolicyMultiError, or nil if none found. +func (m *LoadBalancingPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadBalancingPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadBalancingPolicyMultiError(errors) + } + + return nil +} + +// LoadBalancingPolicyMultiError is an error wrapping multiple validation +// errors returned by LoadBalancingPolicy.ValidateAll() if the designated +// constraints aren't met. +type LoadBalancingPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadBalancingPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadBalancingPolicyMultiError) AllErrors() []error { return m } + +// LoadBalancingPolicyValidationError is the validation error returned by +// LoadBalancingPolicy.Validate if the designated constraints aren't met. +type LoadBalancingPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadBalancingPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadBalancingPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadBalancingPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadBalancingPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadBalancingPolicyValidationError) ErrorName() string { + return "LoadBalancingPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadBalancingPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadBalancingPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadBalancingPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadBalancingPolicyValidationError{} + +// Validate checks the field values on UpstreamConnectionOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamConnectionOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamConnectionOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamConnectionOptionsMultiError, or nil if none found. +func (m *UpstreamConnectionOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamConnectionOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTcpKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SetLocalInterfaceNameOnUpstreamConnections + + if all { + switch v := interface{}(m.GetHappyEyeballsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHappyEyeballsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpstreamConnectionOptionsMultiError(errors) + } + + return nil +} + +// UpstreamConnectionOptionsMultiError is an error wrapping multiple validation +// errors returned by UpstreamConnectionOptions.ValidateAll() if the +// designated constraints aren't met. +type UpstreamConnectionOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamConnectionOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamConnectionOptionsMultiError) AllErrors() []error { return m } + +// UpstreamConnectionOptionsValidationError is the validation error returned by +// UpstreamConnectionOptions.Validate if the designated constraints aren't met. +type UpstreamConnectionOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamConnectionOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamConnectionOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamConnectionOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamConnectionOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamConnectionOptionsValidationError) ErrorName() string { + return "UpstreamConnectionOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamConnectionOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamConnectionOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamConnectionOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamConnectionOptionsValidationError{} + +// Validate checks the field values on TrackClusterStats with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TrackClusterStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TrackClusterStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TrackClusterStatsMultiError, or nil if none found. +func (m *TrackClusterStats) ValidateAll() error { + return m.validate(true) +} + +func (m *TrackClusterStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TimeoutBudgets + + // no validation rules for RequestResponseSizes + + // no validation rules for PerEndpointStats + + if len(errors) > 0 { + return TrackClusterStatsMultiError(errors) + } + + return nil +} + +// TrackClusterStatsMultiError is an error wrapping multiple validation errors +// returned by TrackClusterStats.ValidateAll() if the designated constraints +// aren't met. +type TrackClusterStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TrackClusterStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TrackClusterStatsMultiError) AllErrors() []error { return m } + +// TrackClusterStatsValidationError is the validation error returned by +// TrackClusterStats.Validate if the designated constraints aren't met. +type TrackClusterStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TrackClusterStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TrackClusterStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TrackClusterStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TrackClusterStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TrackClusterStatsValidationError) ErrorName() string { + return "TrackClusterStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e TrackClusterStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTrackClusterStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TrackClusterStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TrackClusterStatsValidationError{} + +// Validate checks the field values on Cluster_TransportSocketMatch with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_TransportSocketMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_TransportSocketMatch with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_TransportSocketMatchMultiError, or nil if none found. +func (m *Cluster_TransportSocketMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_TransportSocketMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Cluster_TransportSocketMatchValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_TransportSocketMatchMultiError(errors) + } + + return nil +} + +// Cluster_TransportSocketMatchMultiError is an error wrapping multiple +// validation errors returned by Cluster_TransportSocketMatch.ValidateAll() if +// the designated constraints aren't met. +type Cluster_TransportSocketMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_TransportSocketMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_TransportSocketMatchMultiError) AllErrors() []error { return m } + +// Cluster_TransportSocketMatchValidationError is the validation error returned +// by Cluster_TransportSocketMatch.Validate if the designated constraints +// aren't met. +type Cluster_TransportSocketMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_TransportSocketMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_TransportSocketMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_TransportSocketMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_TransportSocketMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_TransportSocketMatchValidationError) ErrorName() string { + return "Cluster_TransportSocketMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_TransportSocketMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_TransportSocketMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_TransportSocketMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_TransportSocketMatchValidationError{} + +// Validate checks the field values on Cluster_CustomClusterType with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_CustomClusterType) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_CustomClusterType with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_CustomClusterTypeMultiError, or nil if none found. +func (m *Cluster_CustomClusterType) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CustomClusterType) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Cluster_CustomClusterTypeValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_CustomClusterTypeMultiError(errors) + } + + return nil +} + +// Cluster_CustomClusterTypeMultiError is an error wrapping multiple validation +// errors returned by Cluster_CustomClusterType.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CustomClusterTypeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CustomClusterTypeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CustomClusterTypeMultiError) AllErrors() []error { return m } + +// Cluster_CustomClusterTypeValidationError is the validation error returned by +// Cluster_CustomClusterType.Validate if the designated constraints aren't met. +type Cluster_CustomClusterTypeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CustomClusterTypeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CustomClusterTypeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CustomClusterTypeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CustomClusterTypeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CustomClusterTypeValidationError) ErrorName() string { + return "Cluster_CustomClusterTypeValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CustomClusterTypeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CustomClusterType.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CustomClusterTypeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CustomClusterTypeValidationError{} + +// Validate checks the field values on Cluster_EdsClusterConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_EdsClusterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_EdsClusterConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_EdsClusterConfigMultiError, or nil if none found. +func (m *Cluster_EdsClusterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_EdsClusterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ServiceName + + if len(errors) > 0 { + return Cluster_EdsClusterConfigMultiError(errors) + } + + return nil +} + +// Cluster_EdsClusterConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_EdsClusterConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_EdsClusterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_EdsClusterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_EdsClusterConfigMultiError) AllErrors() []error { return m } + +// Cluster_EdsClusterConfigValidationError is the validation error returned by +// Cluster_EdsClusterConfig.Validate if the designated constraints aren't met. +type Cluster_EdsClusterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_EdsClusterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_EdsClusterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_EdsClusterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_EdsClusterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_EdsClusterConfigValidationError) ErrorName() string { + return "Cluster_EdsClusterConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_EdsClusterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_EdsClusterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_EdsClusterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_EdsClusterConfigValidationError{} + +// Validate checks the field values on Cluster_LbSubsetConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_LbSubsetConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_LbSubsetConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_LbSubsetConfigMultiError, or nil if none found. +func (m *Cluster_LbSubsetConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LbSubsetConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_name[int32(m.GetFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfigValidationError{ + field: "FallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultSubset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultSubset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSubsetSelectors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for LocalityWeightAware + + // no validation rules for ScaleLocalityWeight + + // no validation rules for PanicModeAny + + // no validation rules for ListAsAny + + if _, ok := Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name[int32(m.GetMetadataFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfigValidationError{ + field: "MetadataFallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Cluster_LbSubsetConfigMultiError(errors) + } + + return nil +} + +// Cluster_LbSubsetConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_LbSubsetConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_LbSubsetConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LbSubsetConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LbSubsetConfigMultiError) AllErrors() []error { return m } + +// Cluster_LbSubsetConfigValidationError is the validation error returned by +// Cluster_LbSubsetConfig.Validate if the designated constraints aren't met. +type Cluster_LbSubsetConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LbSubsetConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LbSubsetConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LbSubsetConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LbSubsetConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LbSubsetConfigValidationError) ErrorName() string { + return "Cluster_LbSubsetConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LbSubsetConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LbSubsetConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LbSubsetConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LbSubsetConfigValidationError{} + +// Validate checks the field values on Cluster_SlowStartConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_SlowStartConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_SlowStartConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_SlowStartConfigMultiError, or nil if none found. +func (m *Cluster_SlowStartConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_SlowStartConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAggression()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAggression()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinWeightPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinWeightPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_SlowStartConfigMultiError(errors) + } + + return nil +} + +// Cluster_SlowStartConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_SlowStartConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_SlowStartConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_SlowStartConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_SlowStartConfigMultiError) AllErrors() []error { return m } + +// Cluster_SlowStartConfigValidationError is the validation error returned by +// Cluster_SlowStartConfig.Validate if the designated constraints aren't met. +type Cluster_SlowStartConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_SlowStartConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_SlowStartConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_SlowStartConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_SlowStartConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_SlowStartConfigValidationError) ErrorName() string { + return "Cluster_SlowStartConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_SlowStartConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_SlowStartConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_SlowStartConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_SlowStartConfigValidationError{} + +// Validate checks the field values on Cluster_RoundRobinLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RoundRobinLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RoundRobinLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RoundRobinLbConfigMultiError, or nil if none found. +func (m *Cluster_RoundRobinLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RoundRobinLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_RoundRobinLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_RoundRobinLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_RoundRobinLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_RoundRobinLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RoundRobinLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RoundRobinLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_RoundRobinLbConfigValidationError is the validation error returned +// by Cluster_RoundRobinLbConfig.Validate if the designated constraints aren't met. +type Cluster_RoundRobinLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RoundRobinLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RoundRobinLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RoundRobinLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RoundRobinLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RoundRobinLbConfigValidationError) ErrorName() string { + return "Cluster_RoundRobinLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RoundRobinLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RoundRobinLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RoundRobinLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RoundRobinLbConfigValidationError{} + +// Validate checks the field values on Cluster_LeastRequestLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_LeastRequestLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_LeastRequestLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_LeastRequestLbConfigMultiError, or nil if none found. +func (m *Cluster_LeastRequestLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LeastRequestLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetChoiceCount(); wrapper != nil { + + if wrapper.GetValue() < 2 { + err := Cluster_LeastRequestLbConfigValidationError{ + field: "ChoiceCount", + reason: "value must be greater than or equal to 2", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetActiveRequestBias()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveRequestBias()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_LeastRequestLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_LeastRequestLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_LeastRequestLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_LeastRequestLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LeastRequestLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LeastRequestLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_LeastRequestLbConfigValidationError is the validation error returned +// by Cluster_LeastRequestLbConfig.Validate if the designated constraints +// aren't met. +type Cluster_LeastRequestLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LeastRequestLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LeastRequestLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LeastRequestLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LeastRequestLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LeastRequestLbConfigValidationError) ErrorName() string { + return "Cluster_LeastRequestLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LeastRequestLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LeastRequestLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LeastRequestLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LeastRequestLbConfigValidationError{} + +// Validate checks the field values on Cluster_RingHashLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RingHashLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RingHashLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RingHashLbConfigMultiError, or nil if none found. +func (m *Cluster_RingHashLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RingHashLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMinimumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := Cluster_RingHashLbConfigValidationError{ + field: "MinimumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if _, ok := Cluster_RingHashLbConfig_HashFunction_name[int32(m.GetHashFunction())]; !ok { + err := Cluster_RingHashLbConfigValidationError{ + field: "HashFunction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMaximumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := Cluster_RingHashLbConfigValidationError{ + field: "MaximumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_RingHashLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_RingHashLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_RingHashLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_RingHashLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RingHashLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RingHashLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_RingHashLbConfigValidationError is the validation error returned by +// Cluster_RingHashLbConfig.Validate if the designated constraints aren't met. +type Cluster_RingHashLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RingHashLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RingHashLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RingHashLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RingHashLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RingHashLbConfigValidationError) ErrorName() string { + return "Cluster_RingHashLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RingHashLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RingHashLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RingHashLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RingHashLbConfigValidationError{} + +// Validate checks the field values on Cluster_MaglevLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_MaglevLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_MaglevLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_MaglevLbConfigMultiError, or nil if none found. +func (m *Cluster_MaglevLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_MaglevLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetTableSize(); wrapper != nil { + + if wrapper.GetValue() > 5000011 { + err := Cluster_MaglevLbConfigValidationError{ + field: "TableSize", + reason: "value must be less than or equal to 5000011", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_MaglevLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_MaglevLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_MaglevLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_MaglevLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_MaglevLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_MaglevLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_MaglevLbConfigValidationError is the validation error returned by +// Cluster_MaglevLbConfig.Validate if the designated constraints aren't met. +type Cluster_MaglevLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_MaglevLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_MaglevLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_MaglevLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_MaglevLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_MaglevLbConfigValidationError) ErrorName() string { + return "Cluster_MaglevLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_MaglevLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_MaglevLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_MaglevLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_MaglevLbConfigValidationError{} + +// Validate checks the field values on Cluster_OriginalDstLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_OriginalDstLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_OriginalDstLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_OriginalDstLbConfigMultiError, or nil if none found. +func (m *Cluster_OriginalDstLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_OriginalDstLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHttpHeader + + // no validation rules for HttpHeaderName + + if wrapper := m.GetUpstreamPortOverride(); wrapper != nil { + + if wrapper.GetValue() > 65535 { + err := Cluster_OriginalDstLbConfigValidationError{ + field: "UpstreamPortOverride", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_OriginalDstLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_OriginalDstLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_OriginalDstLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_OriginalDstLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_OriginalDstLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_OriginalDstLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_OriginalDstLbConfigValidationError is the validation error returned +// by Cluster_OriginalDstLbConfig.Validate if the designated constraints +// aren't met. +type Cluster_OriginalDstLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_OriginalDstLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_OriginalDstLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_OriginalDstLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_OriginalDstLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_OriginalDstLbConfigValidationError) ErrorName() string { + return "Cluster_OriginalDstLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_OriginalDstLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_OriginalDstLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_OriginalDstLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_OriginalDstLbConfigValidationError{} + +// Validate checks the field values on Cluster_CommonLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_CommonLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_CommonLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHealthyPanicThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthyPanicThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpdateMergeWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpdateMergeWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IgnoreNewHostsUntilFirstHc + + // no validation rules for CloseConnectionsOnHostSetChange + + if all { + switch v := interface{}(m.GetConsistentHashingLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsistentHashingLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverrideHostStatus()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideHostStatus()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.LocalityConfigSpecifier.(type) { + case *Cluster_CommonLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetZoneAwareLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_CommonLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return Cluster_CommonLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_CommonLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_CommonLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfigValidationError is the validation error returned by +// Cluster_CommonLbConfig.Validate if the designated constraints aren't met. +type Cluster_CommonLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CommonLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CommonLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfigValidationError{} + +// Validate checks the field values on Cluster_RefreshRate with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RefreshRate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RefreshRate with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RefreshRateMultiError, or nil if none found. +func (m *Cluster_RefreshRate) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RefreshRate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Cluster_RefreshRateValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := Cluster_RefreshRateValidationError{ + field: "MaxInterval", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Cluster_RefreshRateMultiError(errors) + } + + return nil +} + +// Cluster_RefreshRateMultiError is an error wrapping multiple validation +// errors returned by Cluster_RefreshRate.ValidateAll() if the designated +// constraints aren't met. +type Cluster_RefreshRateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RefreshRateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RefreshRateMultiError) AllErrors() []error { return m } + +// Cluster_RefreshRateValidationError is the validation error returned by +// Cluster_RefreshRate.Validate if the designated constraints aren't met. +type Cluster_RefreshRateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RefreshRateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RefreshRateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RefreshRateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RefreshRateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RefreshRateValidationError) ErrorName() string { + return "Cluster_RefreshRateValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RefreshRateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RefreshRate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RefreshRateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RefreshRateValidationError{} + +// Validate checks the field values on Cluster_PreconnectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_PreconnectPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_PreconnectPolicy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_PreconnectPolicyMultiError, or nil if none found. +func (m *Cluster_PreconnectPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_PreconnectPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetPerUpstreamPreconnectRatio(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 3 { + err := Cluster_PreconnectPolicyValidationError{ + field: "PerUpstreamPreconnectRatio", + reason: "value must be inside range [1, 3]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetPredictivePreconnectRatio(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 3 { + err := Cluster_PreconnectPolicyValidationError{ + field: "PredictivePreconnectRatio", + reason: "value must be inside range [1, 3]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_PreconnectPolicyMultiError(errors) + } + + return nil +} + +// Cluster_PreconnectPolicyMultiError is an error wrapping multiple validation +// errors returned by Cluster_PreconnectPolicy.ValidateAll() if the designated +// constraints aren't met. +type Cluster_PreconnectPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_PreconnectPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_PreconnectPolicyMultiError) AllErrors() []error { return m } + +// Cluster_PreconnectPolicyValidationError is the validation error returned by +// Cluster_PreconnectPolicy.Validate if the designated constraints aren't met. +type Cluster_PreconnectPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_PreconnectPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_PreconnectPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_PreconnectPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_PreconnectPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_PreconnectPolicyValidationError) ErrorName() string { + return "Cluster_PreconnectPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_PreconnectPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_PreconnectPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_PreconnectPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_PreconnectPolicyValidationError{} + +// Validate checks the field values on Cluster_LbSubsetConfig_LbSubsetSelector +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_LbSubsetConfig_LbSubsetSelector with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Cluster_LbSubsetConfig_LbSubsetSelectorMultiError, or nil if none found. +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SingleHostPerSubset + + if _, ok := Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_name[int32(m.GetFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{ + field: "FallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Cluster_LbSubsetConfig_LbSubsetSelectorMultiError(errors) + } + + return nil +} + +// Cluster_LbSubsetConfig_LbSubsetSelectorMultiError is an error wrapping +// multiple validation errors returned by +// Cluster_LbSubsetConfig_LbSubsetSelector.ValidateAll() if the designated +// constraints aren't met. +type Cluster_LbSubsetConfig_LbSubsetSelectorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LbSubsetConfig_LbSubsetSelectorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LbSubsetConfig_LbSubsetSelectorMultiError) AllErrors() []error { return m } + +// Cluster_LbSubsetConfig_LbSubsetSelectorValidationError is the validation +// error returned by Cluster_LbSubsetConfig_LbSubsetSelector.Validate if the +// designated constraints aren't met. +type Cluster_LbSubsetConfig_LbSubsetSelectorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) ErrorName() string { + return "Cluster_LbSubsetConfig_LbSubsetSelectorValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LbSubsetConfig_LbSubsetSelector.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{} + +// Validate checks the field values on Cluster_CommonLbConfig_ZoneAwareLbConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_ZoneAwareLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRoutingEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoutingEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinClusterSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinClusterSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FailTrafficOnPanic + + if len(errors) > 0 { + return Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError is an error wrapping +// multiple validation errors returned by +// Cluster_CommonLbConfig_ZoneAwareLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError is the validation +// error returned by Cluster_CommonLbConfig_ZoneAwareLbConfig.Validate if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_ZoneAwareLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{} + +// Validate checks the field values on +// Cluster_CommonLbConfig_LocalityWeightedLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_LocalityWeightedLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError is an error +// wrapping multiple validation errors returned by +// Cluster_CommonLbConfig_LocalityWeightedLbConfig.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError is the +// validation error returned by +// Cluster_CommonLbConfig_LocalityWeightedLbConfig.Validate if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_LocalityWeightedLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError{} + +// Validate checks the field values on +// Cluster_CommonLbConfig_ConsistentHashingLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_ConsistentHashingLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError is an error +// wrapping multiple validation errors returned by +// Cluster_CommonLbConfig_ConsistentHashingLbConfig.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError is the +// validation error returned by +// Cluster_CommonLbConfig_ConsistentHashingLbConfig.Validate if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_ConsistentHashingLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{} + +// Validate checks the field values on LoadBalancingPolicy_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LoadBalancingPolicy_Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadBalancingPolicy_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadBalancingPolicy_PolicyMultiError, or nil if none found. +func (m *LoadBalancingPolicy_Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadBalancingPolicy_Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTypedExtensionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedExtensionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return LoadBalancingPolicy_PolicyMultiError(errors) + } + + return nil +} + +// LoadBalancingPolicy_PolicyMultiError is an error wrapping multiple +// validation errors returned by LoadBalancingPolicy_Policy.ValidateAll() if +// the designated constraints aren't met. +type LoadBalancingPolicy_PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadBalancingPolicy_PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadBalancingPolicy_PolicyMultiError) AllErrors() []error { return m } + +// LoadBalancingPolicy_PolicyValidationError is the validation error returned +// by LoadBalancingPolicy_Policy.Validate if the designated constraints aren't met. +type LoadBalancingPolicy_PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadBalancingPolicy_PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadBalancingPolicy_PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadBalancingPolicy_PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadBalancingPolicy_PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadBalancingPolicy_PolicyValidationError) ErrorName() string { + return "LoadBalancingPolicy_PolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadBalancingPolicy_PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadBalancingPolicy_Policy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadBalancingPolicy_PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadBalancingPolicy_PolicyValidationError{} + +// Validate checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError, or nil if none found. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FirstAddressFamilyVersion + + if wrapper := m.GetFirstAddressFamilyCount(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := UpstreamConnectionOptions_HappyEyeballsConfigValidationError{ + field: "FirstAddressFamilyCount", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return UpstreamConnectionOptions_HappyEyeballsConfigMultiError(errors) + } + + return nil +} + +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError is an error wrapping +// multiple validation errors returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.ValidateAll() if the +// designated constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) AllErrors() []error { return m } + +// UpstreamConnectionOptions_HappyEyeballsConfigValidationError is the +// validation error returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.Validate if the designated +// constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) ErrorName() string { + return "UpstreamConnectionOptions_HappyEyeballsConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamConnectionOptions_HappyEyeballsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go new file mode 100644 index 000000000..4e2c50887 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go @@ -0,0 +1,3439 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Entries != nil { + if vtmsg, ok := interface{}(m.Entries).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_TransportSocketMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_TransportSocketMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_TransportSocketMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Match != nil { + size, err := (*structpb.Struct)(m.Match).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CustomClusterType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CustomClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CustomClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_EdsClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_EdsClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_EdsClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.EdsConfig != nil { + if vtmsg, ok := interface{}(m.EdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SingleHostPerSubset { + i-- + if m.SingleHostPerSubset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.FallbackKeysSubset) > 0 { + for iNdEx := len(m.FallbackKeysSubset) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.FallbackKeysSubset[iNdEx]) + copy(dAtA[i:], m.FallbackKeysSubset[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FallbackKeysSubset[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataFallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MetadataFallbackPolicy)) + i-- + dAtA[i] = 0x40 + } + if m.ListAsAny { + i-- + if m.ListAsAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PanicModeAny { + i-- + if m.PanicModeAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.ScaleLocalityWeight { + i-- + if m.ScaleLocalityWeight { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalityWeightAware { + i-- + if m.LocalityWeightAware { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.SubsetSelectors) > 0 { + for iNdEx := len(m.SubsetSelectors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubsetSelectors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.DefaultSubset != nil { + size, err := (*structpb.Struct)(m.DefaultSubset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RingHashLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RingHashLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x18 + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_MaglevLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_MaglevLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TableSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.TableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamPortOverride != nil { + size, err := (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.HttpHeaderName) > 0 { + i -= len(m.HttpHeaderName) + copy(dAtA[i:], m.HttpHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HttpHeaderName))) + i-- + dAtA[i] = 0x12 + } + if m.UseHttpHeader { + i-- + if m.UseHttpHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OverrideHostStatus != nil { + if vtmsg, ok := interface{}(m.OverrideHostStatus).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverrideHostStatus) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ConsistentHashingLbConfig != nil { + size, err := m.ConsistentHashingLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.CloseConnectionsOnHostSetChange { + i-- + if m.CloseConnectionsOnHostSetChange { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IgnoreNewHostsUntilFirstHc { + i-- + if m.IgnoreNewHostsUntilFirstHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.UpdateMergeWindow != nil { + size, err := (*durationpb.Duration)(m.UpdateMergeWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.HealthyPanicThreshold != nil { + if vtmsg, ok := interface{}(m.HealthyPanicThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthyPanicThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Cluster_RefreshRate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RefreshRate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RefreshRate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_PreconnectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_PreconnectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_PreconnectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PredictivePreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PerUpstreamPreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for iNdEx := len(m.LrsReportEndpointMetrics) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LrsReportEndpointMetrics[iNdEx]) + copy(dAtA[i:], m.LrsReportEndpointMetrics[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LrsReportEndpointMetrics[iNdEx]))) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xca + } + } + if msg, ok := m.LbConfig.(*Cluster_RoundRobinLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xba + } + if m.WaitForWarmOnInit != nil { + size, err := (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if msg, ok := m.LbConfig.(*Cluster_MaglevLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ConnectionPoolPerDownstreamConnection { + i-- + if m.ConnectionPoolPerDownstreamConnection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.PreconnectPolicy != nil { + size, err := m.PreconnectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.TrackClusterStats != nil { + size, err := m.TrackClusterStats.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.UpstreamConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.TrackTimeoutBudgets { + i-- + if m.TrackTimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.DnsFailureRefreshRate != nil { + size, err := m.DnsFailureRefreshRate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if len(m.TransportSocketMatches) > 0 { + for iNdEx := len(m.TransportSocketMatches) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TransportSocketMatches[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + } + if m.LrsServer != nil { + if vtmsg, ok := interface{}(m.LrsServer).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LrsServer) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if m.LoadBalancingPolicy != nil { + size, err := m.LoadBalancingPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + } + if m.RespectDnsTtl { + i-- + if m.RespectDnsTtl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_ClusterType); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*Cluster_LeastRequestLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k := range m.TypedExtensionProtocolOptions { + v := m.TypedExtensionProtocolOptions[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + } + if msg, ok := m.LbConfig.(*Cluster_OriginalDstLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadAssignment != nil { + if vtmsg, ok := interface{}(m.LoadAssignment).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadAssignment) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.IgnoreHealthOnHostRemoval { + i-- + if m.IgnoreHealthOnHostRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.CloseConnectionsOnHostHealthFailure { + i-- + if m.CloseConnectionsOnHostHealthFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamConnectionOptions != nil { + size, err := m.UpstreamConnectionOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.AltStatName) > 0 { + i -= len(m.AltStatName) + copy(dAtA[i:], m.AltStatName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AltStatName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.CommonLbConfig != nil { + size, err := m.CommonLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.ProtocolSelection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolSelection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.LbConfig.(*Cluster_RingHashLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LbSubsetConfig != nil { + size, err := m.LbSubsetConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CleanupInterval != nil { + size, err := (*durationpb.Duration)(m.CleanupInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.DnsResolvers) > 0 { + for iNdEx := len(m.DnsResolvers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DnsResolvers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolvers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.DnsLookupFamily != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DnsLookupFamily)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.DnsRefreshRate != nil { + size, err := (*durationpb.Duration)(m.DnsRefreshRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.CircuitBreakers != nil { + size, err := m.CircuitBreakers.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.HealthChecks) > 0 { + for iNdEx := len(m.HealthChecks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HealthChecks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthChecks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.LbPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LbPolicy)) + i-- + dAtA[i] = 0x30 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.ConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.ConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.EdsClusterConfig != nil { + size, err := m.EdsClusterConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_Type); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_Type) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_Type) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *Cluster_RingHashLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RingHashLbConfig != nil { + size, err := m.RingHashLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *Cluster_OriginalDstLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OriginalDstLbConfig != nil { + size, err := m.OriginalDstLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Cluster_LeastRequestLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LeastRequestLbConfig != nil { + size, err := m.LeastRequestLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} +func (m *Cluster_ClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_ClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterType != nil { + size, err := m.ClusterType.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_MaglevLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaglevLbConfig != nil { + size, err := m.MaglevLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_RoundRobinLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RoundRobinLbConfig != nil { + size, err := m.RoundRobinLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + return len(dAtA) - i, nil +} +func (m *LoadBalancingPolicy_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedExtensionConfig != nil { + if vtmsg, ok := interface{}(m.TypedExtensionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedExtensionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancingPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Policies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FirstAddressFamilyCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FirstAddressFamilyVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FirstAddressFamilyVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HappyEyeballsConfig != nil { + size, err := m.HappyEyeballsConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + i-- + if m.SetLocalInterfaceNameOnUpstreamConnections { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TcpKeepalive != nil { + if vtmsg, ok := interface{}(m.TcpKeepalive).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TcpKeepalive) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TrackClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrackClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TrackClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerEndpointStats { + i-- + if m.PerEndpointStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RequestResponseSizes { + i-- + if m.RequestResponseSizes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TimeoutBudgets { + i-- + if m.TimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entries != nil { + if size, ok := interface{}(m.Entries).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Entries) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_TransportSocketMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + l = (*structpb.Struct)(m.Match).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CustomClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_EdsClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EdsConfig != nil { + if size, ok := interface{}(m.EdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if len(m.FallbackKeysSubset) > 0 { + for _, s := range m.FallbackKeysSubset { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SingleHostPerSubset { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if m.DefaultSubset != nil { + l = (*structpb.Struct)(m.DefaultSubset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubsetSelectors) > 0 { + for _, e := range m.SubsetSelectors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalityWeightAware { + n += 2 + } + if m.ScaleLocalityWeight { + n += 2 + } + if m.PanicModeAny { + n += 2 + } + if m.ListAsAny { + n += 2 + } + if m.MetadataFallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MetadataFallbackPolicy)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RoundRobinLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LeastRequestLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RingHashLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_MaglevLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TableSize != nil { + l = (*wrapperspb.UInt64Value)(m.TableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_OriginalDstLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHttpHeader { + n += 2 + } + l = len(m.HttpHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamPortOverride != nil { + l = (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HealthyPanicThreshold != nil { + if size, ok := interface{}(m.HealthyPanicThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HealthyPanicThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.UpdateMergeWindow != nil { + l = (*durationpb.Duration)(m.UpdateMergeWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IgnoreNewHostsUntilFirstHc { + n += 2 + } + if m.CloseConnectionsOnHostSetChange { + n += 2 + } + if m.ConsistentHashingLbConfig != nil { + l = m.ConsistentHashingLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideHostStatus != nil { + if size, ok := interface{}(m.OverrideHostStatus).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverrideHostStatus) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_RefreshRate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_PreconnectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PerUpstreamPreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PredictivePreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ClusterDiscoveryType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.EdsClusterConfig != nil { + l = m.EdsClusterConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectTimeout != nil { + l = (*durationpb.Duration)(m.ConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LbPolicy)) + } + if len(m.HealthChecks) > 0 { + for _, e := range m.HealthChecks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + l = m.CircuitBreakers.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsRefreshRate != nil { + l = (*durationpb.Duration)(m.DnsRefreshRate).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsLookupFamily != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DnsLookupFamily)) + } + if len(m.DnsResolvers) > 0 { + for _, e := range m.DnsResolvers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CleanupInterval != nil { + l = (*durationpb.Duration)(m.CleanupInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbSubsetConfig != nil { + l = m.LbSubsetConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolSelection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ProtocolSelection)) + } + if m.CommonLbConfig != nil { + l = m.CommonLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AltStatName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamConnectionOptions != nil { + l = m.UpstreamConnectionOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CloseConnectionsOnHostHealthFailure { + n += 3 + } + if m.IgnoreHealthOnHostRemoval { + n += 3 + } + if m.LoadAssignment != nil { + if size, ok := interface{}(m.LoadAssignment).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadAssignment) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k, v := range m.TypedExtensionProtocolOptions { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RespectDnsTtl { + n += 3 + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingPolicy != nil { + l = m.LoadBalancingPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LrsServer != nil { + if size, ok := interface{}(m.LrsServer).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LrsServer) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TransportSocketMatches) > 0 { + for _, e := range m.TransportSocketMatches { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsFailureRefreshRate != nil { + l = m.DnsFailureRefreshRate.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if m.UpstreamHttpProtocolOptions != nil { + if size, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackTimeoutBudgets { + n += 3 + } + if m.UpstreamConfig != nil { + if size, ok := interface{}(m.UpstreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackClusterStats != nil { + l = m.TrackClusterStats.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreconnectPolicy != nil { + l = m.PreconnectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionPoolPerDownstreamConnection { + n += 3 + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WaitForWarmOnInit != nil { + l = (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for _, s := range m.LrsReportEndpointMetrics { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_Type) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + return n +} +func (m *Cluster_RingHashLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RingHashLbConfig != nil { + l = m.RingHashLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_OriginalDstLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OriginalDstLbConfig != nil { + l = m.OriginalDstLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_LeastRequestLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeastRequestLbConfig != nil { + l = m.LeastRequestLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_ClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterType != nil { + l = m.ClusterType.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_MaglevLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaglevLbConfig != nil { + l = m.MaglevLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_RoundRobinLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoundRobinLbConfig != nil { + l = m.RoundRobinLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *LoadBalancingPolicy_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedExtensionConfig != nil { + if size, ok := interface{}(m.TypedExtensionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedExtensionConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadBalancingPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FirstAddressFamilyVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FirstAddressFamilyVersion)) + } + if m.FirstAddressFamilyCount != nil { + l = (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpKeepalive != nil { + if size, ok := interface{}(m.TcpKeepalive).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TcpKeepalive) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + n += 2 + } + if m.HappyEyeballsConfig != nil { + l = m.HappyEyeballsConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TrackClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutBudgets { + n += 2 + } + if m.RequestResponseSizes { + n += 2 + } + if m.PerEndpointStats { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go new file mode 100644 index 000000000..42ddbe2bb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // Note that Envoy's :ref:`downstream network + // filters ` are not valid upstream network filters. + // Only one of typed_config or config_discovery can be used. + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + // Only one of typed_config or config_discovery can be used. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,3,opt,name=config_discovery,json=configDiscovery,proto3" json:"config_discovery,omitempty"` +} + +func (x *Filter) Reset() { + *x = Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_filter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Filter) ProtoMessage() {} + +func (x *Filter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_filter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Filter.ProtoReflect.Descriptor instead. +func (*Filter) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_filter_proto_rawDescGZIP(), []int{0} +} + +func (x *Filter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Filter) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +func (x *Filter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x != nil { + return x.ConfigDiscovery + } + return nil +} + +var File_envoy_config_cluster_v3_filter_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_filter_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, + 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xda, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x56, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, + 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x88, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_filter_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_filter_proto_rawDescData = file_envoy_config_cluster_v3_filter_proto_rawDesc +) + +func file_envoy_config_cluster_v3_filter_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_filter_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_filter_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_filter_proto_rawDescData +} + +var file_envoy_config_cluster_v3_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_cluster_v3_filter_proto_goTypes = []interface{}{ + (*Filter)(nil), // 0: envoy.config.cluster.v3.Filter + (*anypb.Any)(nil), // 1: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 2: envoy.config.core.v3.ExtensionConfigSource +} +var file_envoy_config_cluster_v3_filter_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.Filter.typed_config:type_name -> google.protobuf.Any + 2, // 1: envoy.config.cluster.v3.Filter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_filter_proto_init() } +func file_envoy_config_cluster_v3_filter_proto_init() { + if File_envoy_config_cluster_v3_filter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_filter_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_filter_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_filter_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_filter_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_filter_proto = out.File + file_envoy_config_cluster_v3_filter_proto_rawDesc = nil + file_envoy_config_cluster_v3_filter_proto_goTypes = nil + file_envoy_config_cluster_v3_filter_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go new file mode 100644 index 000000000..6de8120e0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go @@ -0,0 +1,204 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Filter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in FilterMultiError, or nil if none found. +func (m *Filter) ValidateAll() error { + return m.validate(true) +} + +func (m *Filter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := FilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterMultiError(errors) + } + + return nil +} + +// FilterMultiError is an error wrapping multiple validation errors returned by +// Filter.ValidateAll() if the designated constraints aren't met. +type FilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterMultiError) AllErrors() []error { return m } + +// FilterValidationError is the validation error returned by Filter.Validate if +// the designated constraints aren't met. +type FilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterValidationError) ErrorName() string { return "FilterValidationError" } + +// Error satisfies the builtin error interface +func (e FilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go new file mode 100644 index 000000000..f253344e6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go new file mode 100644 index 000000000..531cbd0ef --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go @@ -0,0 +1,641 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +// [#next-free-field: 26] +type OutlierDetection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of consecutive server-side error responses (for HTTP traffic, + // 5xx responses; for TCP traffic, connection failures; for Redis, failure to + // respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. + Consecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=consecutive_5xx,json=consecutive5xx,proto3" json:"consecutive_5xx,omitempty"` + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. + // Defaults to 30000ms or 30s. + BaseEjectionTime *durationpb.Duration `protobuf:"bytes,3,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` + // The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% . + // Will eject at least one host regardless of the value if :ref:`always_eject_one_host` is enabled. + MaxEjectionPercent *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + EnforcingConsecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + EnforcingSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=enforcing_success_rate,json=enforcingSuccessRate,proto3" json:"enforcing_success_rate,omitempty"` + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + SuccessRateMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=success_rate_minimum_hosts,json=successRateMinimumHosts,proto3" json:"success_rate_minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + SuccessRateRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=success_rate_request_volume,json=successRateRequestVolume,proto3" json:"success_rate_request_volume,omitempty"` + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + SuccessRateStdevFactor *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=success_rate_stdev_factor,json=successRateStdevFactor,proto3" json:"success_rate_stdev_factor,omitempty"` + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + ConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=consecutive_gateway_failure,json=consecutiveGatewayFailure,proto3" json:"consecutive_gateway_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + EnforcingConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=enforcing_consecutive_gateway_failure,json=enforcingConsecutiveGatewayFailure,proto3" json:"enforcing_consecutive_gateway_failure,omitempty"` + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + SplitExternalLocalOriginErrors bool `protobuf:"varint,12,opt,name=split_external_local_origin_errors,json=splitExternalLocalOriginErrors,proto3" json:"split_external_local_origin_errors,omitempty"` + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + ConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=consecutive_local_origin_failure,json=consecutiveLocalOriginFailure,proto3" json:"consecutive_local_origin_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + EnforcingConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,14,opt,name=enforcing_consecutive_local_origin_failure,json=enforcingConsecutiveLocalOriginFailure,proto3" json:"enforcing_consecutive_local_origin_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + EnforcingLocalOriginSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,15,opt,name=enforcing_local_origin_success_rate,json=enforcingLocalOriginSuccessRate,proto3" json:"enforcing_local_origin_success_rate,omitempty"` + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + FailurePercentageThreshold *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=failure_percentage_threshold,json=failurePercentageThreshold,proto3" json:"failure_percentage_threshold,omitempty"` + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + EnforcingFailurePercentage *wrapperspb.UInt32Value `protobuf:"bytes,17,opt,name=enforcing_failure_percentage,json=enforcingFailurePercentage,proto3" json:"enforcing_failure_percentage,omitempty"` + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + EnforcingFailurePercentageLocalOrigin *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=enforcing_failure_percentage_local_origin,json=enforcingFailurePercentageLocalOrigin,proto3" json:"enforcing_failure_percentage_local_origin,omitempty"` + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + FailurePercentageMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,19,opt,name=failure_percentage_minimum_hosts,json=failurePercentageMinimumHosts,proto3" json:"failure_percentage_minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + FailurePercentageRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,20,opt,name=failure_percentage_request_volume,json=failurePercentageRequestVolume,proto3" json:"failure_percentage_request_volume,omitempty"` + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. If not specified, the default value (300000ms or 300s) or + // :ref:`base_ejection_time` value is applied, whatever is larger. + MaxEjectionTime *durationpb.Duration `protobuf:"bytes,21,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` + // The maximum amount of jitter to add to the ejection time, in order to prevent + // a 'thundering herd' effect where all proxies try to reconnect to host at the same time. + // See :ref:`max_ejection_time_jitter` + // Defaults to 0s. + MaxEjectionTimeJitter *durationpb.Duration `protobuf:"bytes,22,opt,name=max_ejection_time_jitter,json=maxEjectionTimeJitter,proto3" json:"max_ejection_time_jitter,omitempty"` + // If active health checking is enabled and a host is ejected by outlier detection, a successful active health check + // unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters. + // To change this default behavior set this config to “false“ where active health checking will not uneject the host. + // Defaults to true. + SuccessfulActiveHealthCheckUnejectHost *wrapperspb.BoolValue `protobuf:"bytes,23,opt,name=successful_active_health_check_uneject_host,json=successfulActiveHealthCheckUnejectHost,proto3" json:"successful_active_health_check_uneject_host,omitempty"` + // Set of host's passive monitors. + // [#not-implemented-hide:] + Monitors []*v3.TypedExtensionConfig `protobuf:"bytes,24,rep,name=monitors,proto3" json:"monitors,omitempty"` + // If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent`. + // Defaults to false. + AlwaysEjectOneHost *wrapperspb.BoolValue `protobuf:"bytes,25,opt,name=always_eject_one_host,json=alwaysEjectOneHost,proto3" json:"always_eject_one_host,omitempty"` +} + +func (x *OutlierDetection) Reset() { + *x = OutlierDetection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutlierDetection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutlierDetection) ProtoMessage() {} + +func (x *OutlierDetection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutlierDetection.ProtoReflect.Descriptor instead. +func (*OutlierDetection) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP(), []int{0} +} + +func (x *OutlierDetection) GetConsecutive_5Xx() *wrapperspb.UInt32Value { + if x != nil { + return x.Consecutive_5Xx + } + return nil +} + +func (x *OutlierDetection) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *OutlierDetection) GetBaseEjectionTime() *durationpb.Duration { + if x != nil { + return x.BaseEjectionTime + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionPercent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxEjectionPercent + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutive_5Xx() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutive_5Xx + } + return nil +} + +func (x *OutlierDetection) GetEnforcingSuccessRate() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingSuccessRate + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateMinimumHosts + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateRequestVolume + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateStdevFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateStdevFactor + } + return nil +} + +func (x *OutlierDetection) GetConsecutiveGatewayFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.ConsecutiveGatewayFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutiveGatewayFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutiveGatewayFailure + } + return nil +} + +func (x *OutlierDetection) GetSplitExternalLocalOriginErrors() bool { + if x != nil { + return x.SplitExternalLocalOriginErrors + } + return false +} + +func (x *OutlierDetection) GetConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.ConsecutiveLocalOriginFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutiveLocalOriginFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingLocalOriginSuccessRate() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingLocalOriginSuccessRate + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageThreshold + } + return nil +} + +func (x *OutlierDetection) GetEnforcingFailurePercentage() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingFailurePercentage + } + return nil +} + +func (x *OutlierDetection) GetEnforcingFailurePercentageLocalOrigin() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingFailurePercentageLocalOrigin + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageMinimumHosts + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageRequestVolume + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionTime() *durationpb.Duration { + if x != nil { + return x.MaxEjectionTime + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionTimeJitter() *durationpb.Duration { + if x != nil { + return x.MaxEjectionTimeJitter + } + return nil +} + +func (x *OutlierDetection) GetSuccessfulActiveHealthCheckUnejectHost() *wrapperspb.BoolValue { + if x != nil { + return x.SuccessfulActiveHealthCheckUnejectHost + } + return nil +} + +func (x *OutlierDetection) GetMonitors() []*v3.TypedExtensionConfig { + if x != nil { + return x.Monitors + } + return nil +} + +func (x *OutlierDetection) GetAlwaysEjectOneHost() *wrapperspb.BoolValue { + if x != nil { + return x.AlwaysEjectOneHost + } + return nil +} + +var File_envoy_config_cluster_v3_outlier_detection_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, + 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x13, 0x0a, 0x10, + 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x35, 0x78, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x3f, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x12, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x6d, + 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, + 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, + 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x17, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x5b, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x14, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x1a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x61, 0x74, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, + 0x5b, 0x0a, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x18, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x19, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x64, + 0x65, 0x76, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x53, 0x74, 0x64, 0x65, 0x76, 0x46, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x5c, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x12, 0x78, 0x0a, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x22, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x4a, 0x0a, + 0x22, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, 0x73, 0x70, 0x6c, 0x69, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x65, 0x0a, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x12, 0x81, 0x01, 0x0a, 0x2a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x26, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x73, 0x0a, 0x23, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1f, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, + 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x67, 0x0a, 0x1c, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x12, 0x67, 0x0a, 0x1c, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, + 0x1a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x7f, 0x0a, 0x29, 0x65, + 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x65, 0x0a, 0x20, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x21, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x11, + 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x6d, 0x61, + 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x12, 0x77, 0x0a, 0x2b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x26, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, + 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x15, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x65, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x6e, 0x65, 0x48, 0x6f, 0x73, + 0x74, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x92, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x42, 0x15, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData = file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc +) + +func file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData +} + +var file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_cluster_v3_outlier_detection_proto_goTypes = []interface{}{ + (*OutlierDetection)(nil), // 0: envoy.config.cluster.v3.OutlierDetection + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.BoolValue)(nil), // 3: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.OutlierDetection.consecutive_5xx:type_name -> google.protobuf.UInt32Value + 2, // 1: envoy.config.cluster.v3.OutlierDetection.interval:type_name -> google.protobuf.Duration + 2, // 2: envoy.config.cluster.v3.OutlierDetection.base_ejection_time:type_name -> google.protobuf.Duration + 1, // 3: envoy.config.cluster.v3.OutlierDetection.max_ejection_percent:type_name -> google.protobuf.UInt32Value + 1, // 4: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_5xx:type_name -> google.protobuf.UInt32Value + 1, // 5: envoy.config.cluster.v3.OutlierDetection.enforcing_success_rate:type_name -> google.protobuf.UInt32Value + 1, // 6: envoy.config.cluster.v3.OutlierDetection.success_rate_minimum_hosts:type_name -> google.protobuf.UInt32Value + 1, // 7: envoy.config.cluster.v3.OutlierDetection.success_rate_request_volume:type_name -> google.protobuf.UInt32Value + 1, // 8: envoy.config.cluster.v3.OutlierDetection.success_rate_stdev_factor:type_name -> google.protobuf.UInt32Value + 1, // 9: envoy.config.cluster.v3.OutlierDetection.consecutive_gateway_failure:type_name -> google.protobuf.UInt32Value + 1, // 10: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_gateway_failure:type_name -> google.protobuf.UInt32Value + 1, // 11: envoy.config.cluster.v3.OutlierDetection.consecutive_local_origin_failure:type_name -> google.protobuf.UInt32Value + 1, // 12: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_local_origin_failure:type_name -> google.protobuf.UInt32Value + 1, // 13: envoy.config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate:type_name -> google.protobuf.UInt32Value + 1, // 14: envoy.config.cluster.v3.OutlierDetection.failure_percentage_threshold:type_name -> google.protobuf.UInt32Value + 1, // 15: envoy.config.cluster.v3.OutlierDetection.enforcing_failure_percentage:type_name -> google.protobuf.UInt32Value + 1, // 16: envoy.config.cluster.v3.OutlierDetection.enforcing_failure_percentage_local_origin:type_name -> google.protobuf.UInt32Value + 1, // 17: envoy.config.cluster.v3.OutlierDetection.failure_percentage_minimum_hosts:type_name -> google.protobuf.UInt32Value + 1, // 18: envoy.config.cluster.v3.OutlierDetection.failure_percentage_request_volume:type_name -> google.protobuf.UInt32Value + 2, // 19: envoy.config.cluster.v3.OutlierDetection.max_ejection_time:type_name -> google.protobuf.Duration + 2, // 20: envoy.config.cluster.v3.OutlierDetection.max_ejection_time_jitter:type_name -> google.protobuf.Duration + 3, // 21: envoy.config.cluster.v3.OutlierDetection.successful_active_health_check_uneject_host:type_name -> google.protobuf.BoolValue + 4, // 22: envoy.config.cluster.v3.OutlierDetection.monitors:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 23: envoy.config.cluster.v3.OutlierDetection.always_eject_one_host:type_name -> google.protobuf.BoolValue + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_outlier_detection_proto_init() } +func file_envoy_config_cluster_v3_outlier_detection_proto_init() { + if File_envoy_config_cluster_v3_outlier_detection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_outlier_detection_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_outlier_detection_proto = out.File + file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = nil + file_envoy_config_cluster_v3_outlier_detection_proto_goTypes = nil + file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go new file mode 100644 index 000000000..966821457 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go @@ -0,0 +1,717 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OutlierDetection with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OutlierDetection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutlierDetection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OutlierDetectionMultiError, or nil if none found. +func (m *OutlierDetection) ValidateAll() error { + return m.validate(true) +} + +func (m *OutlierDetection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConsecutive_5Xx()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutive_5Xx()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "Interval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetBaseEjectionTime(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "BaseEjectionTime", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "BaseEjectionTime", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if wrapper := m.GetMaxEjectionPercent(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "MaxEjectionPercent", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingConsecutive_5Xx(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutive_5Xx", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingSuccessRate(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingSuccessRate", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetSuccessRateMinimumHosts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateMinimumHosts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRateRequestVolume()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateRequestVolume()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRateStdevFactor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateStdevFactor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConsecutiveGatewayFailure()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutiveGatewayFailure()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetEnforcingConsecutiveGatewayFailure(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutiveGatewayFailure", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for SplitExternalLocalOriginErrors + + if all { + switch v := interface{}(m.GetConsecutiveLocalOriginFailure()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutiveLocalOriginFailure()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetEnforcingConsecutiveLocalOriginFailure(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutiveLocalOriginFailure", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingLocalOriginSuccessRate(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingLocalOriginSuccessRate", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetFailurePercentageThreshold(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "FailurePercentageThreshold", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingFailurePercentage(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingFailurePercentage", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingFailurePercentageLocalOrigin(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingFailurePercentageLocalOrigin", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetFailurePercentageMinimumHosts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailurePercentageMinimumHosts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFailurePercentageRequestVolume()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailurePercentageRequestVolume()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetMaxEjectionTime(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "MaxEjectionTime", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "MaxEjectionTime", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetMaxEjectionTimeJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxEjectionTimeJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessfulActiveHealthCheckUnejectHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessfulActiveHealthCheckUnejectHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetMonitors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetAlwaysEjectOneHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlwaysEjectOneHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OutlierDetectionMultiError(errors) + } + + return nil +} + +// OutlierDetectionMultiError is an error wrapping multiple validation errors +// returned by OutlierDetection.ValidateAll() if the designated constraints +// aren't met. +type OutlierDetectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutlierDetectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutlierDetectionMultiError) AllErrors() []error { return m } + +// OutlierDetectionValidationError is the validation error returned by +// OutlierDetection.Validate if the designated constraints aren't met. +type OutlierDetectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutlierDetectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutlierDetectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutlierDetectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutlierDetectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutlierDetectionValidationError) ErrorName() string { return "OutlierDetectionValidationError" } + +// Error satisfies the builtin error interface +func (e OutlierDetectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutlierDetection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutlierDetectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutlierDetectionValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go new file mode 100644 index 000000000..f837d56bd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go @@ -0,0 +1,456 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysEjectOneHost != nil { + size, err := (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Monitors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Monitors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + size, err := (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.MaxEjectionTimeJitter != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTimeJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.MaxEjectionTime != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.FailurePercentageRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.FailurePercentageMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.EnforcingFailurePercentage != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FailurePercentageThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.EnforcingLocalOriginSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.ConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.SplitExternalLocalOriginErrors { + i-- + if m.SplitExternalLocalOriginErrors { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.EnforcingConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.ConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.SuccessRateStdevFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SuccessRateRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.SuccessRateMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.EnforcingSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.EnforcingConsecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxEjectionPercent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BaseEjectionTime != nil { + size, err := (*durationpb.Duration)(m.BaseEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Consecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Consecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BaseEjectionTime != nil { + l = (*durationpb.Duration)(m.BaseEjectionTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionPercent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateStdevFactor != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitExternalLocalOriginErrors { + n += 2 + } + if m.ConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingLocalOriginSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentage != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTime != nil { + l = (*durationpb.Duration)(m.MaxEjectionTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTimeJitter != nil { + l = (*durationpb.Duration)(m.MaxEjectionTimeJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + l = (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Monitors) > 0 { + for _, e := range m.Monitors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysEjectOneHost != nil { + l = (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go new file mode 100644 index 000000000..659879f9d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go @@ -0,0 +1,1778 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A matcher, which may traverse a matching tree in order to result in a match action. +// During matching, the tree will be traversed until a match is found, or if no match +// is found the action specified by the most specific on_no_match will be evaluated. +// As an on_no_match might result in another matching tree being evaluated, this process +// might repeat several times until the final OnMatch (or no match) is decided. +// +// .. note:: +// +// Please use the syntactically equivalent :ref:`matching API ` +type Matcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatcherType: + // + // *Matcher_MatcherList_ + // *Matcher_MatcherTree_ + MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"` + // Optional OnMatch to use if the matcher failed. + // If specified, the OnMatch is used, and the matcher is considered + // to have matched. + // If not specified, the matcher is considered not to have matched. + OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"` +} + +func (x *Matcher) Reset() { + *x = Matcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher) ProtoMessage() {} + +func (x *Matcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher.ProtoReflect.Descriptor instead. +func (*Matcher) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0} +} + +func (m *Matcher) GetMatcherType() isMatcher_MatcherType { + if m != nil { + return m.MatcherType + } + return nil +} + +func (x *Matcher) GetMatcherList() *Matcher_MatcherList { + if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok { + return x.MatcherList + } + return nil +} + +func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree { + if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok { + return x.MatcherTree + } + return nil +} + +func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch { + if x != nil { + return x.OnNoMatch + } + return nil +} + +type isMatcher_MatcherType interface { + isMatcher_MatcherType() +} + +type Matcher_MatcherList_ struct { + // A linear list of matchers to evaluate. + MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"` +} + +type Matcher_MatcherTree_ struct { + // A match tree to evaluate. + MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"` +} + +func (*Matcher_MatcherList_) isMatcher_MatcherType() {} + +func (*Matcher_MatcherTree_) isMatcher_MatcherType() {} + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +type MatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *MatchPredicate_OrMatch + // *MatchPredicate_AndMatch + // *MatchPredicate_NotMatch + // *MatchPredicate_AnyMatch + // *MatchPredicate_HttpRequestHeadersMatch + // *MatchPredicate_HttpRequestTrailersMatch + // *MatchPredicate_HttpResponseHeadersMatch + // *MatchPredicate_HttpResponseTrailersMatch + // *MatchPredicate_HttpRequestGenericBodyMatch + // *MatchPredicate_HttpResponseGenericBodyMatch + Rule isMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *MatchPredicate) Reset() { + *x = MatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate) ProtoMessage() {} + +func (x *MatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate.ProtoReflect.Descriptor instead. +func (*MatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{1} +} + +func (m *MatchPredicate) GetRule() isMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *MatchPredicate) GetOrMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *MatchPredicate) GetAndMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *MatchPredicate) GetNotMatch() *MatchPredicate { + if x, ok := x.GetRule().(*MatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *MatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*MatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *MatchPredicate) GetHttpRequestHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestHeadersMatch); ok { + return x.HttpRequestHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestTrailersMatch); ok { + return x.HttpRequestTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseHeadersMatch); ok { + return x.HttpResponseHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseTrailersMatch); ok { + return x.HttpResponseTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + return x.HttpRequestGenericBodyMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + return x.HttpResponseGenericBodyMatch + } + return nil +} + +type isMatchPredicate_Rule interface { + isMatchPredicate_Rule() +} + +type MatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *MatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type MatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *MatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type MatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *MatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type MatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestHeadersMatch struct { + // HTTP request headers match configuration. + HttpRequestHeadersMatch *HttpHeadersMatch `protobuf:"bytes,5,opt,name=http_request_headers_match,json=httpRequestHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestTrailersMatch struct { + // HTTP request trailers match configuration. + HttpRequestTrailersMatch *HttpHeadersMatch `protobuf:"bytes,6,opt,name=http_request_trailers_match,json=httpRequestTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseHeadersMatch struct { + // HTTP response headers match configuration. + HttpResponseHeadersMatch *HttpHeadersMatch `protobuf:"bytes,7,opt,name=http_response_headers_match,json=httpResponseHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseTrailersMatch struct { + // HTTP response trailers match configuration. + HttpResponseTrailersMatch *HttpHeadersMatch `protobuf:"bytes,8,opt,name=http_response_trailers_match,json=httpResponseTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestGenericBodyMatch struct { + // HTTP request generic body match configuration. + HttpRequestGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,9,opt,name=http_request_generic_body_match,json=httpRequestGenericBodyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseGenericBodyMatch struct { + // HTTP response generic body match configuration. + HttpResponseGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,10,opt,name=http_response_generic_body_match,json=httpResponseGenericBodyMatch,proto3,oneof"` +} + +func (*MatchPredicate_OrMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AndMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_NotMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AnyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestGenericBodyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseGenericBodyMatch) isMatchPredicate_Rule() {} + +// HTTP headers match configuration. +type HttpHeadersMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // HTTP headers to match. + Headers []*v3.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HttpHeadersMatch) Reset() { + *x = HttpHeadersMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpHeadersMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpHeadersMatch) ProtoMessage() {} + +func (x *HttpHeadersMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpHeadersMatch.ProtoReflect.Descriptor instead. +func (*HttpHeadersMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpHeadersMatch) GetHeaders() []*v3.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +type HttpGenericBodyMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + BytesLimit uint32 `protobuf:"varint,1,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` + // List of patterns to match. + Patterns []*HttpGenericBodyMatch_GenericTextMatch `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *HttpGenericBodyMatch) Reset() { + *x = HttpGenericBodyMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpGenericBodyMatch) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +func (x *HttpGenericBodyMatch) GetPatterns() []*HttpGenericBodyMatch_GenericTextMatch { + if x != nil { + return x.Patterns + } + return nil +} + +// What to do if a match is successful. +type Matcher_OnMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OnMatch: + // + // *Matcher_OnMatch_Matcher + // *Matcher_OnMatch_Action + OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"` +} + +func (x *Matcher_OnMatch) Reset() { + *x = Matcher_OnMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_OnMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_OnMatch) ProtoMessage() {} + +func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead. +func (*Matcher_OnMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch { + if m != nil { + return m.OnMatch + } + return nil +} + +func (x *Matcher_OnMatch) GetMatcher() *Matcher { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Matcher_OnMatch) GetAction() *v31.TypedExtensionConfig { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok { + return x.Action + } + return nil +} + +type isMatcher_OnMatch_OnMatch interface { + isMatcher_OnMatch_OnMatch() +} + +type Matcher_OnMatch_Matcher struct { + // Nested matcher to evaluate. + // If the nested matcher does not match and does not specify + // on_no_match, then this matcher is considered not to have + // matched, even if a predicate at this level or above returned + // true. + Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"` +} + +type Matcher_OnMatch_Action struct { + // Protocol-specific action to take. + Action *v31.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"` +} + +func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {} + +func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {} + +// A linear list of field matchers. +// The field matchers are evaluated in order, and the first match +// wins. +type Matcher_MatcherList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of matchers. First match wins. + Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (x *Matcher_MatcherList) Reset() { + *x = Matcher_MatcherList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList) ProtoMessage() {} + +func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher { + if x != nil { + return x.Matchers + } + return nil +} + +type Matcher_MatcherTree struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Protocol-specific specification of input field to match on. + Input *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Exact or prefix match maps in which to look up the input value. + // If the lookup succeeds, the match is considered successful, and + // the corresponding OnMatch is used. + // + // Types that are assignable to TreeType: + // + // *Matcher_MatcherTree_ExactMatchMap + // *Matcher_MatcherTree_PrefixMatchMap + // *Matcher_MatcherTree_CustomMatch + TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"` +} + +func (x *Matcher_MatcherTree) Reset() { + *x = Matcher_MatcherTree{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree) ProtoMessage() {} + +func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Matcher_MatcherTree) GetInput() *v31.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType { + if m != nil { + return m.TreeType + } + return nil +} + +func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok { + return x.ExactMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok { + return x.PrefixMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetCustomMatch() *v31.TypedExtensionConfig { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherTree_TreeType interface { + isMatcher_MatcherTree_TreeType() +} + +type Matcher_MatcherTree_ExactMatchMap struct { + ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_PrefixMatchMap struct { + // Longest matching prefix wins. + PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_CustomMatch struct { + // Extension for custom matching logic. + CustomMatch *v31.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {} + +// Predicate to determine if a match is successful. +type Matcher_MatcherList_Predicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchType: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ + // *Matcher_MatcherList_Predicate_OrMatcher + // *Matcher_MatcherList_Predicate_AndMatcher + // *Matcher_MatcherList_Predicate_NotMatcher + MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"` +} + +func (x *Matcher_MatcherList_Predicate) Reset() { + *x = Matcher_MatcherList_Predicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType { + if m != nil { + return m.MatchType + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + return x.SinglePredicate + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok { + return x.OrMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok { + return x.AndMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok { + return x.NotMatcher + } + return nil +} + +type isMatcher_MatcherList_Predicate_MatchType interface { + isMatcher_MatcherList_Predicate_MatchType() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ struct { + // A single predicate to evaluate. + SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_OrMatcher struct { + // A list of predicates to be OR-ed together. + OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_AndMatcher struct { + // A list of predicates to be AND-ed together. + AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_NotMatcher struct { + // The invert of a predicate + NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +// An individual matcher. +type Matcher_MatcherList_FieldMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines if the match succeeds. + Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` + // What to do if the match succeeds. + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Matcher_MatcherList_FieldMatcher) Reset() { + *x = Matcher_MatcherList_FieldMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_FieldMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {} + +func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +// Predicate for a single input field. +type Matcher_MatcherList_Predicate_SinglePredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Protocol-specific specification of input field to match on. + // [#extension-category: envoy.matching.common_inputs] + Input *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to Matcher: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch + // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch + Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"` +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() { + *x = Matcher_MatcherList_Predicate_SinglePredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v31.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *v32.StringMatcher { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + return x.ValueMatch + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v31.TypedExtensionConfig { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface { + isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct { + // Built-in string matcher. + ValueMatch *v32.StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct { + // Extension for custom matching logic. + // [#extension-category: envoy.matching.input_matchers] + CustomMatch *v31.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +// A list of two or more matchers. Used to allow using a list within a oneof. +type Matcher_MatcherList_Predicate_PredicateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() { + *x = Matcher_MatcherList_Predicate_PredicateList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +// A map of configured matchers. Used to allow using a map within a oneof. +type Matcher_MatcherTree_MatchMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Matcher_MatcherTree_MatchMap) Reset() { + *x = Matcher_MatcherTree_MatchMap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree_MatchMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {} + +func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch { + if x != nil { + return x.Map + } + return nil +} + +// A set of match configurations used for logical operations. +type MatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*MatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *MatchPredicate_MatchSet) Reset() { + *x = MatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate_MatchSet) ProtoMessage() {} + +func (x *MatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*MatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *MatchPredicate_MatchSet) GetRules() []*MatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +type HttpGenericBodyMatch_GenericTextMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *HttpGenericBodyMatch_GenericTextMatch_StringMatch + // *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch + Rule isHttpGenericBodyMatch_GenericTextMatch_Rule `protobuf_oneof:"rule"` +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) Reset() { + *x = HttpGenericBodyMatch_GenericTextMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch_GenericTextMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch_GenericTextMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch_GenericTextMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch_GenericTextMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) GetRule() isHttpGenericBodyMatch_GenericTextMatch_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetStringMatch() string { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + return x.StringMatch + } + return "" +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetBinaryMatch() []byte { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + return x.BinaryMatch + } + return nil +} + +type isHttpGenericBodyMatch_GenericTextMatch_Rule interface { + isHttpGenericBodyMatch_GenericTextMatch_Rule() +} + +type HttpGenericBodyMatch_GenericTextMatch_StringMatch struct { + // Text string to be located in HTTP body. + StringMatch string `protobuf:"bytes,1,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type HttpGenericBodyMatch_GenericTextMatch_BinaryMatch struct { + // Sequence of bytes to be located in HTTP body. + BinaryMatch []byte `protobuf:"bytes,2,opt,name=binary_match,json=binaryMatch,proto3,oneof"` +} + +func (*HttpGenericBodyMatch_GenericTextMatch_StringMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +func (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +var File_envoy_config_common_matcher_v3_matcher_proto protoreflect.FileDescriptor + +var file_envoy_config_common_matcher_v3_matcher_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, + 0x11, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x0c, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, + 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x4f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, + 0xa5, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x44, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xa2, 0x09, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, + 0xdc, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x7a, 0x0a, + 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, + 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x6c, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6e, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, 0x64, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x87, 0x02, 0x0a, 0x0f, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4a, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x0b, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x4f, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, + 0xf8, 0x42, 0x01, 0x1a, 0x76, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x65, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, + 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xcb, + 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x65, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xe7, 0x04, 0x0a, + 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x4a, 0x0a, 0x05, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x66, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, + 0x12, 0x68, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x4f, 0x0a, 0x0c, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xd6, 0x01, 0x0a, 0x08, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x61, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x4d, 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x67, 0x0a, 0x08, 0x4d, + 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x13, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe8, 0x08, 0x0a, 0x0e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x54, + 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x56, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x4d, 0x0a, 0x09, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, + 0x6e, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x6f, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x17, 0x68, 0x74, 0x74, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, + 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x73, 0x0a, 0x1c, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x48, 0x00, 0x52, 0x19, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x7c, 0x0a, 0x1f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, + 0x52, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x7e, 0x0a, + 0x20, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, + 0x1c, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x5a, 0x0a, + 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x12, 0x4e, 0x0a, 0x05, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x52, 0x0a, 0x10, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x14, 0x48, + 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x73, 0x1a, 0x7b, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, + 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x97, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x2c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_common_matcher_v3_matcher_proto_rawDescOnce sync.Once + file_envoy_config_common_matcher_v3_matcher_proto_rawDescData = file_envoy_config_common_matcher_v3_matcher_proto_rawDesc +) + +func file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP() []byte { + file_envoy_config_common_matcher_v3_matcher_proto_rawDescOnce.Do(func() { + file_envoy_config_common_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_common_matcher_v3_matcher_proto_rawDescData) + }) + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescData +} + +var file_envoy_config_common_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_envoy_config_common_matcher_v3_matcher_proto_goTypes = []interface{}{ + (*Matcher)(nil), // 0: envoy.config.common.matcher.v3.Matcher + (*MatchPredicate)(nil), // 1: envoy.config.common.matcher.v3.MatchPredicate + (*HttpHeadersMatch)(nil), // 2: envoy.config.common.matcher.v3.HttpHeadersMatch + (*HttpGenericBodyMatch)(nil), // 3: envoy.config.common.matcher.v3.HttpGenericBodyMatch + (*Matcher_OnMatch)(nil), // 4: envoy.config.common.matcher.v3.Matcher.OnMatch + (*Matcher_MatcherList)(nil), // 5: envoy.config.common.matcher.v3.Matcher.MatcherList + (*Matcher_MatcherTree)(nil), // 6: envoy.config.common.matcher.v3.Matcher.MatcherTree + (*Matcher_MatcherList_Predicate)(nil), // 7: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + (*Matcher_MatcherList_FieldMatcher)(nil), // 8: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher + (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 9: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 10: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + (*Matcher_MatcherTree_MatchMap)(nil), // 11: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + nil, // 12: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + (*MatchPredicate_MatchSet)(nil), // 13: envoy.config.common.matcher.v3.MatchPredicate.MatchSet + (*HttpGenericBodyMatch_GenericTextMatch)(nil), // 14: envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch + (*v3.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*v31.TypedExtensionConfig)(nil), // 16: envoy.config.core.v3.TypedExtensionConfig + (*v32.StringMatcher)(nil), // 17: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_config_common_matcher_v3_matcher_proto_depIdxs = []int32{ + 5, // 0: envoy.config.common.matcher.v3.Matcher.matcher_list:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList + 6, // 1: envoy.config.common.matcher.v3.Matcher.matcher_tree:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree + 4, // 2: envoy.config.common.matcher.v3.Matcher.on_no_match:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 13, // 3: envoy.config.common.matcher.v3.MatchPredicate.or_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate.MatchSet + 13, // 4: envoy.config.common.matcher.v3.MatchPredicate.and_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate.MatchSet + 1, // 5: envoy.config.common.matcher.v3.MatchPredicate.not_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 2, // 6: envoy.config.common.matcher.v3.MatchPredicate.http_request_headers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 7: envoy.config.common.matcher.v3.MatchPredicate.http_request_trailers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 8: envoy.config.common.matcher.v3.MatchPredicate.http_response_headers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 9: envoy.config.common.matcher.v3.MatchPredicate.http_response_trailers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 3, // 10: envoy.config.common.matcher.v3.MatchPredicate.http_request_generic_body_match:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch + 3, // 11: envoy.config.common.matcher.v3.MatchPredicate.http_response_generic_body_match:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch + 15, // 12: envoy.config.common.matcher.v3.HttpHeadersMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 14, // 13: envoy.config.common.matcher.v3.HttpGenericBodyMatch.patterns:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch + 0, // 14: envoy.config.common.matcher.v3.Matcher.OnMatch.matcher:type_name -> envoy.config.common.matcher.v3.Matcher + 16, // 15: envoy.config.common.matcher.v3.Matcher.OnMatch.action:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 16: envoy.config.common.matcher.v3.Matcher.MatcherList.matchers:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher + 16, // 17: envoy.config.common.matcher.v3.Matcher.MatcherTree.input:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 18: envoy.config.common.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + 11, // 19: envoy.config.common.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + 16, // 20: envoy.config.common.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> envoy.config.core.v3.TypedExtensionConfig + 9, // 21: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + 10, // 22: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 10, // 23: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 7, // 24: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 7, // 25: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 4, // 26: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 16, // 27: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 28: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> envoy.type.matcher.v3.StringMatcher + 16, // 29: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> envoy.config.core.v3.TypedExtensionConfig + 7, // 30: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 12, // 31: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + 4, // 32: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 1, // 33: envoy.config.common.matcher.v3.MatchPredicate.MatchSet.rules:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_envoy_config_common_matcher_v3_matcher_proto_init() } +func file_envoy_config_common_matcher_v3_matcher_proto_init() { + if File_envoy_config_common_matcher_v3_matcher_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpHeadersMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_OnMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_FieldMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree_MatchMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch_GenericTextMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_)(nil), + (*Matcher_MatcherTree_)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MatchPredicate_OrMatch)(nil), + (*MatchPredicate_AndMatch)(nil), + (*MatchPredicate_NotMatch)(nil), + (*MatchPredicate_AnyMatch)(nil), + (*MatchPredicate_HttpRequestHeadersMatch)(nil), + (*MatchPredicate_HttpRequestTrailersMatch)(nil), + (*MatchPredicate_HttpResponseHeadersMatch)(nil), + (*MatchPredicate_HttpResponseTrailersMatch)(nil), + (*MatchPredicate_HttpRequestGenericBodyMatch)(nil), + (*MatchPredicate_HttpResponseGenericBodyMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*Matcher_OnMatch_Matcher)(nil), + (*Matcher_OnMatch_Action)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Matcher_MatcherTree_ExactMatchMap)(nil), + (*Matcher_MatcherTree_PrefixMatchMap)(nil), + (*Matcher_MatcherTree_CustomMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil), + (*Matcher_MatcherList_Predicate_OrMatcher)(nil), + (*Matcher_MatcherList_Predicate_AndMatcher)(nil), + (*Matcher_MatcherList_Predicate_NotMatcher)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil), + (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*HttpGenericBodyMatch_GenericTextMatch_StringMatch)(nil), + (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_common_matcher_v3_matcher_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_common_matcher_v3_matcher_proto_goTypes, + DependencyIndexes: file_envoy_config_common_matcher_v3_matcher_proto_depIdxs, + MessageInfos: file_envoy_config_common_matcher_v3_matcher_proto_msgTypes, + }.Build() + File_envoy_config_common_matcher_v3_matcher_proto = out.File + file_envoy_config_common_matcher_v3_matcher_proto_rawDesc = nil + file_envoy_config_common_matcher_v3_matcher_proto_goTypes = nil + file_envoy_config_common_matcher_v3_matcher_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go new file mode 100644 index 000000000..88607c30b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go @@ -0,0 +1,3044 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MatcherMultiError, or nil if none found. +func (m *Matcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetOnNoMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherTypePresent := false + switch v := m.MatcherType.(type) { + case *Matcher_MatcherList_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true + + if all { + switch v := interface{}(m.GetMatcherList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true + + if all { + switch v := interface{}(m.GetMatcherTree()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherTypePresent { + err := MatcherValidationError{ + field: "MatcherType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatcherMultiError(errors) + } + + return nil +} + +// MatcherMultiError is an error wrapping multiple validation errors returned +// by Matcher.ValidateAll() if the designated constraints aren't met. +type MatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatcherMultiError) AllErrors() []error { return m } + +// MatcherValidationError is the validation error returned by Matcher.Validate +// if the designated constraints aren't met. +type MatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatcherValidationError{} + +// Validate checks the field values on MatchPredicate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MatchPredicateMultiError, +// or nil if none found. +func (m *MatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := MatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatchPredicateMultiError(errors) + } + + return nil +} + +// MatchPredicateMultiError is an error wrapping multiple validation errors +// returned by MatchPredicate.ValidateAll() if the designated constraints +// aren't met. +type MatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicateMultiError) AllErrors() []error { return m } + +// MatchPredicateValidationError is the validation error returned by +// MatchPredicate.Validate if the designated constraints aren't met. +type MatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicateValidationError) ErrorName() string { return "MatchPredicateValidationError" } + +// Error satisfies the builtin error interface +func (e MatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicateValidationError{} + +// Validate checks the field values on HttpHeadersMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HttpHeadersMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpHeadersMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpHeadersMatchMultiError, or nil if none found. +func (m *HttpHeadersMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpHeadersMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpHeadersMatchMultiError(errors) + } + + return nil +} + +// HttpHeadersMatchMultiError is an error wrapping multiple validation errors +// returned by HttpHeadersMatch.ValidateAll() if the designated constraints +// aren't met. +type HttpHeadersMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpHeadersMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpHeadersMatchMultiError) AllErrors() []error { return m } + +// HttpHeadersMatchValidationError is the validation error returned by +// HttpHeadersMatch.Validate if the designated constraints aren't met. +type HttpHeadersMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpHeadersMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpHeadersMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpHeadersMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpHeadersMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpHeadersMatchValidationError) ErrorName() string { return "HttpHeadersMatchValidationError" } + +// Error satisfies the builtin error interface +func (e HttpHeadersMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpHeadersMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpHeadersMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpHeadersMatchValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpGenericBodyMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesLimit + + if len(m.GetPatterns()) < 1 { + err := HttpGenericBodyMatchValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpGenericBodyMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatchMultiError is an error wrapping multiple validation +// errors returned by HttpGenericBodyMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatchValidationError is the validation error returned by +// HttpGenericBodyMatch.Validate if the designated constraints aren't met. +type HttpGenericBodyMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatchValidationError{} + +// Validate checks the field values on Matcher_OnMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Matcher_OnMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_OnMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_OnMatchMultiError, or nil if none found. +func (m *Matcher_OnMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_OnMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOnMatchPresent := false + switch v := m.OnMatch.(type) { + case *Matcher_OnMatch_Matcher: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_OnMatch_Action: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOnMatchPresent { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_OnMatchMultiError(errors) + } + + return nil +} + +// Matcher_OnMatchMultiError is an error wrapping multiple validation errors +// returned by Matcher_OnMatch.ValidateAll() if the designated constraints +// aren't met. +type Matcher_OnMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_OnMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_OnMatchMultiError) AllErrors() []error { return m } + +// Matcher_OnMatchValidationError is the validation error returned by +// Matcher_OnMatch.Validate if the designated constraints aren't met. +type Matcher_OnMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_OnMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_OnMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_OnMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_OnMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" } + +// Error satisfies the builtin error interface +func (e Matcher_OnMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_OnMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_OnMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_OnMatchValidationError{} + +// Validate checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherListMultiError, or nil if none found. +func (m *Matcher_MatcherList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMatchers()) < 1 { + err := Matcher_MatcherListValidationError{ + field: "Matchers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherListMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherListValidationError is the validation error returned by +// Matcher_MatcherList.Validate if the designated constraints aren't met. +type Matcher_MatcherListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherListValidationError) ErrorName() string { + return "Matcher_MatcherListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTreeMultiError, or nil if none found. +func (m *Matcher_MatcherTree) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTreeTypePresent := false + switch v := m.TreeType.(type) { + case *Matcher_MatcherTree_ExactMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetExactMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_PrefixMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetPrefixMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_CustomMatch: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTreeTypePresent { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherTreeMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTreeMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherTree.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherTreeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTreeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTreeValidationError is the validation error returned by +// Matcher_MatcherTree.Validate if the designated constraints aren't met. +type Matcher_MatcherTreeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTreeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTreeValidationError) ErrorName() string { + return "Matcher_MatcherTreeValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTreeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTreeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTreeValidationError{} + +// Validate checks the field values on Matcher_MatcherList_Predicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_Predicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_PredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchTypePresent := false + switch v := m.MatchType.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetSinglePredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_OrMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetOrMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_AndMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetAndMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_NotMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetNotMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchTypePresent { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_PredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll() +// if the designated constraints aren't met. +type Matcher_MatcherList_PredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_PredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_PredicateValidationError is the validation error +// returned by Matcher_MatcherList_Predicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_PredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_PredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_PredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_PredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_PredicateValidationError{} + +// Validate checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Matcher_MatcherList_FieldMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found. +func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPredicate() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOnMatch() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Matcher_MatcherList_FieldMatcherMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple +// validation errors returned by +// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_FieldMatcherValidationError is the validation error +// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string { + return "Matcher_MatcherList_FieldMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_FieldMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_FieldMatcherValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the +// designated constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the +// validation error returned by +// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_SinglePredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPredicate()) < 2 { + err := Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: "Predicate", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_PredicateListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation +// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if +// the designated constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_PredicateListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree_MatchMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTree_MatchMapMultiError, or nil if none found. +func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMap()) < 1 { + err := Matcher_MatcherTree_MatchMapValidationError{ + field: "Map", + reason: "value must contain at least 1 pair(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetMap())) + i := 0 + for key := range m.GetMap() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetMap()[key] + _ = val + + // no validation rules for Map[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return Matcher_MatcherTree_MatchMapMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if +// the designated constraints aren't met. +type Matcher_MatcherTree_MatchMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTree_MatchMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTree_MatchMapValidationError is the validation error returned +// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints +// aren't met. +type Matcher_MatcherTree_MatchMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string { + return "Matcher_MatcherTree_MatchMapValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTree_MatchMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTree_MatchMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTree_MatchMapValidationError{} + +// Validate checks the field values on MatchPredicate_MatchSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate_MatchSet with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MatchPredicate_MatchSetMultiError, or nil if none found. +func (m *MatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := MatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// MatchPredicate_MatchSetMultiError is an error wrapping multiple validation +// errors returned by MatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type MatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// MatchPredicate_MatchSetValidationError is the validation error returned by +// MatchPredicate_MatchSet.Validate if the designated constraints aren't met. +type MatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicate_MatchSetValidationError) ErrorName() string { + return "MatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e MatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicate_MatchSetValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpGenericBodyMatch_GenericTextMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatch_GenericTextMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch_GenericTextMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if utf8.RuneCountInString(m.GetStringMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "StringMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if len(m.GetBinaryMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "BinaryMatch", + reason: "value length must be at least 1 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpGenericBodyMatch_GenericTextMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatch_GenericTextMatchMultiError is an error wrapping +// multiple validation errors returned by +// HttpGenericBodyMatch_GenericTextMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatch_GenericTextMatchValidationError is the validation error +// returned by HttpGenericBodyMatch_GenericTextMatch.Validate if the +// designated constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatch_GenericTextMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch_GenericTextMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatch_GenericTextMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatch_GenericTextMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go new file mode 100644 index 000000000..431572ef8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go @@ -0,0 +1,2035 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Matcher_OnMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_OnMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Action); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + size, err := m.Matcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_OnMatch_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Action != nil { + if vtmsg, ok := interface{}(m.Action).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Action) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValueMatch != nil { + if vtmsg, ok := interface{}(m.ValueMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ValueMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Predicate) > 0 { + for iNdEx := len(m.Predicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Predicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_NotMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_AndMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_OrMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SinglePredicate != nil { + size, err := m.SinglePredicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatcher != nil { + size, err := m.OrMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatcher != nil { + size, err := m.AndMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatcher != nil { + size, err := m.NotMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_FieldMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnMatch != nil { + size, err := m.OnMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Predicate != nil { + size, err := m.Predicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Matchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Map) > 0 { + for k := range m.Map { + v := m.Map[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_PrefixMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_ExactMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactMatchMap != nil { + size, err := m.ExactMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrefixMatchMap != nil { + size, err := m.PrefixMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnNoMatch != nil { + size, err := m.OnNoMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.MatcherType.(*Matcher_MatcherTree_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatcherType.(*Matcher_MatcherList_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherList != nil { + size, err := m.MatcherList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherTree != nil { + size, err := m.MatcherTree.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OnMatch.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_OnMatch_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + l = m.Matcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_OnMatch_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + if size, ok := interface{}(m.Action).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Action) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValueMatch != nil { + if size, ok := interface{}(m.ValueMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ValueMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_PredicateList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Predicate) > 0 { + for _, e := range m.Predicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SinglePredicate != nil { + l = m.SinglePredicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatcher != nil { + l = m.OrMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatcher != nil { + l = m.AndMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatcher != nil { + l = m.NotMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_FieldMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Predicate != nil { + l = m.Predicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnMatch != nil { + l = m.OnMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_MatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TreeType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_ExactMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactMatchMap != nil { + l = m.ExactMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_PrefixMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrefixMatchMap != nil { + l = m.PrefixMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatcherType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OnNoMatch != nil { + l = m.OnNoMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherList != nil { + l = m.MatcherList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherTree != nil { + l = m.MatcherTree.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go new file mode 100644 index 000000000..a0852aa60 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go @@ -0,0 +1,1110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SocketAddress_Protocol int32 + +const ( + SocketAddress_TCP SocketAddress_Protocol = 0 + SocketAddress_UDP SocketAddress_Protocol = 1 +) + +// Enum value maps for SocketAddress_Protocol. +var ( + SocketAddress_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + } + SocketAddress_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + } +) + +func (x SocketAddress_Protocol) Enum() *SocketAddress_Protocol { + p := new(SocketAddress_Protocol) + *p = x + return p +} + +func (x SocketAddress_Protocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SocketAddress_Protocol) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_address_proto_enumTypes[0].Descriptor() +} + +func (SocketAddress_Protocol) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_address_proto_enumTypes[0] +} + +func (x SocketAddress_Protocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SocketAddress_Protocol.Descriptor instead. +func (SocketAddress_Protocol) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2, 0} +} + +type Pipe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // The mode for the Pipe. Not applicable for abstract sockets. + Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` +} + +func (x *Pipe) Reset() { + *x = Pipe{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Pipe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Pipe) ProtoMessage() {} + +func (x *Pipe) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Pipe.ProtoReflect.Descriptor instead. +func (*Pipe) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{0} +} + +func (x *Pipe) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Pipe) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +// The address represents an envoy internal listener. +// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] +type EnvoyInternalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to AddressNameSpecifier: + // + // *EnvoyInternalAddress_ServerListenerName + AddressNameSpecifier isEnvoyInternalAddress_AddressNameSpecifier `protobuf_oneof:"address_name_specifier"` + // Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + // single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + // example, may be set to the final destination IP for the target internal listener. + EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` +} + +func (x *EnvoyInternalAddress) Reset() { + *x = EnvoyInternalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnvoyInternalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnvoyInternalAddress) ProtoMessage() {} + +func (x *EnvoyInternalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnvoyInternalAddress.ProtoReflect.Descriptor instead. +func (*EnvoyInternalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{1} +} + +func (m *EnvoyInternalAddress) GetAddressNameSpecifier() isEnvoyInternalAddress_AddressNameSpecifier { + if m != nil { + return m.AddressNameSpecifier + } + return nil +} + +func (x *EnvoyInternalAddress) GetServerListenerName() string { + if x, ok := x.GetAddressNameSpecifier().(*EnvoyInternalAddress_ServerListenerName); ok { + return x.ServerListenerName + } + return "" +} + +func (x *EnvoyInternalAddress) GetEndpointId() string { + if x != nil { + return x.EndpointId + } + return "" +} + +type isEnvoyInternalAddress_AddressNameSpecifier interface { + isEnvoyInternalAddress_AddressNameSpecifier() +} + +type EnvoyInternalAddress_ServerListenerName struct { + // Specifies the :ref:`name ` of the + // internal listener. + ServerListenerName string `protobuf:"bytes,1,opt,name=server_listener_name,json=serverListenerName,proto3,oneof"` +} + +func (*EnvoyInternalAddress_ServerListenerName) isEnvoyInternalAddress_AddressNameSpecifier() {} + +// [#next-free-field: 7] +type SocketAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Protocol SocketAddress_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"` + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify “0.0.0.0“ or “::“ + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (“STATIC“ or “EDS“ clusters) or a hostname resolved by DNS + // (“STRICT_DNS“ or “LOGICAL_DNS“ clusters). Address resolution can be customized + // via :ref:`resolver_name `. + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // Types that are assignable to PortSpecifier: + // + // *SocketAddress_PortValue + // *SocketAddress_NamedPort + PortSpecifier isSocketAddress_PortSpecifier `protobuf_oneof:"port_specifier"` + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // “STRICT_DNS“ or “LOGICAL_DNS“ will generate an error at runtime. + ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"` + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to “::“ will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as “::FFFF:“. + Ipv4Compat bool `protobuf:"varint,6,opt,name=ipv4_compat,json=ipv4Compat,proto3" json:"ipv4_compat,omitempty"` +} + +func (x *SocketAddress) Reset() { + *x = SocketAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketAddress) ProtoMessage() {} + +func (x *SocketAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketAddress.ProtoReflect.Descriptor instead. +func (*SocketAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2} +} + +func (x *SocketAddress) GetProtocol() SocketAddress_Protocol { + if x != nil { + return x.Protocol + } + return SocketAddress_TCP +} + +func (x *SocketAddress) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (m *SocketAddress) GetPortSpecifier() isSocketAddress_PortSpecifier { + if m != nil { + return m.PortSpecifier + } + return nil +} + +func (x *SocketAddress) GetPortValue() uint32 { + if x, ok := x.GetPortSpecifier().(*SocketAddress_PortValue); ok { + return x.PortValue + } + return 0 +} + +func (x *SocketAddress) GetNamedPort() string { + if x, ok := x.GetPortSpecifier().(*SocketAddress_NamedPort); ok { + return x.NamedPort + } + return "" +} + +func (x *SocketAddress) GetResolverName() string { + if x != nil { + return x.ResolverName + } + return "" +} + +func (x *SocketAddress) GetIpv4Compat() bool { + if x != nil { + return x.Ipv4Compat + } + return false +} + +type isSocketAddress_PortSpecifier interface { + isSocketAddress_PortSpecifier() +} + +type SocketAddress_PortValue struct { + PortValue uint32 `protobuf:"varint,3,opt,name=port_value,json=portValue,proto3,oneof"` +} + +type SocketAddress_NamedPort struct { + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + NamedPort string `protobuf:"bytes,4,opt,name=named_port,json=namedPort,proto3,oneof"` +} + +func (*SocketAddress_PortValue) isSocketAddress_PortSpecifier() {} + +func (*SocketAddress_NamedPort) isSocketAddress_PortSpecifier() {} + +type TcpKeepalive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) + KeepaliveProbes *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"` + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (i.e., 2 hours.) + KeepaliveTime *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"` + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) + KeepaliveInterval *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"` +} + +func (x *TcpKeepalive) Reset() { + *x = TcpKeepalive{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TcpKeepalive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TcpKeepalive) ProtoMessage() {} + +func (x *TcpKeepalive) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TcpKeepalive.ProtoReflect.Descriptor instead. +func (*TcpKeepalive) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{3} +} + +func (x *TcpKeepalive) GetKeepaliveProbes() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveProbes + } + return nil +} + +func (x *TcpKeepalive) GetKeepaliveTime() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveTime + } + return nil +} + +func (x *TcpKeepalive) GetKeepaliveInterval() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveInterval + } + return nil +} + +type ExtraSourceAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The additional address to bind. + Address *SocketAddress `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the BindConfig. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *ExtraSourceAddress) Reset() { + *x = ExtraSourceAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtraSourceAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtraSourceAddress) ProtoMessage() {} + +func (x *ExtraSourceAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtraSourceAddress.ProtoReflect.Descriptor instead. +func (*ExtraSourceAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtraSourceAddress) GetAddress() *SocketAddress { + if x != nil { + return x.Address + } + return nil +} + +func (x *ExtraSourceAddress) GetSocketOptions() *SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// [#next-free-field: 7] +type BindConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address to bind to when creating a socket. + SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` + // Whether to set the “IP_FREEBIND“ option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option “IP_FREEBIND“ is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + Freebind *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Extra source addresses appended to the address specified in the “source_address“ + // field. This enables to specify multiple source addresses. + // The source address selection is determined by :ref:`local_address_selector + // `. + ExtraSourceAddresses []*ExtraSourceAddress `protobuf:"bytes,5,rep,name=extra_source_addresses,json=extraSourceAddresses,proto3" json:"extra_source_addresses,omitempty"` + // Deprecated by + // :ref:`extra_source_addresses ` + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/address.proto. + AdditionalSourceAddresses []*SocketAddress `protobuf:"bytes,4,rep,name=additional_source_addresses,json=additionalSourceAddresses,proto3" json:"additional_source_addresses,omitempty"` + // Custom local address selector to override the default (i.e. + // :ref:`DefaultLocalAddressSelector + // `). + // [#extension-category: envoy.upstream.local_address_selector] + LocalAddressSelector *TypedExtensionConfig `protobuf:"bytes,6,opt,name=local_address_selector,json=localAddressSelector,proto3" json:"local_address_selector,omitempty"` +} + +func (x *BindConfig) Reset() { + *x = BindConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BindConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BindConfig) ProtoMessage() {} + +func (x *BindConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BindConfig.ProtoReflect.Descriptor instead. +func (*BindConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5} +} + +func (x *BindConfig) GetSourceAddress() *SocketAddress { + if x != nil { + return x.SourceAddress + } + return nil +} + +func (x *BindConfig) GetFreebind() *wrapperspb.BoolValue { + if x != nil { + return x.Freebind + } + return nil +} + +func (x *BindConfig) GetSocketOptions() []*SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *BindConfig) GetExtraSourceAddresses() []*ExtraSourceAddress { + if x != nil { + return x.ExtraSourceAddresses + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/address.proto. +func (x *BindConfig) GetAdditionalSourceAddresses() []*SocketAddress { + if x != nil { + return x.AdditionalSourceAddresses + } + return nil +} + +func (x *BindConfig) GetLocalAddressSelector() *TypedExtensionConfig { + if x != nil { + return x.LocalAddressSelector + } + return nil +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Address: + // + // *Address_SocketAddress + // *Address_Pipe + // *Address_EnvoyInternalAddress + Address isAddress_Address `protobuf_oneof:"address"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6} +} + +func (m *Address) GetAddress() isAddress_Address { + if m != nil { + return m.Address + } + return nil +} + +func (x *Address) GetSocketAddress() *SocketAddress { + if x, ok := x.GetAddress().(*Address_SocketAddress); ok { + return x.SocketAddress + } + return nil +} + +func (x *Address) GetPipe() *Pipe { + if x, ok := x.GetAddress().(*Address_Pipe); ok { + return x.Pipe + } + return nil +} + +func (x *Address) GetEnvoyInternalAddress() *EnvoyInternalAddress { + if x, ok := x.GetAddress().(*Address_EnvoyInternalAddress); ok { + return x.EnvoyInternalAddress + } + return nil +} + +type isAddress_Address interface { + isAddress_Address() +} + +type Address_SocketAddress struct { + SocketAddress *SocketAddress `protobuf:"bytes,1,opt,name=socket_address,json=socketAddress,proto3,oneof"` +} + +type Address_Pipe struct { + Pipe *Pipe `protobuf:"bytes,2,opt,name=pipe,proto3,oneof"` +} + +type Address_EnvoyInternalAddress struct { + // Specifies a user-space address handled by :ref:`internal listeners + // `. + EnvoyInternalAddress *EnvoyInternalAddress `protobuf:"bytes,3,opt,name=envoy_internal_address,json=envoyInternalAddress,proto3,oneof"` +} + +func (*Address_SocketAddress) isAddress_Address() {} + +func (*Address_Pipe) isAddress_Address() {} + +func (*Address_EnvoyInternalAddress) isAddress_Address() {} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +type CidrRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IPv4 or IPv6 address, e.g. “192.0.0.0“ or “2001:db8::“. + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` +} + +func (x *CidrRange) Reset() { + *x = CidrRange{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CidrRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CidrRange) ProtoMessage() {} + +func (x *CidrRange) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. +func (*CidrRange) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{7} +} + +func (x *CidrRange) GetAddressPrefix() string { + if x != nil { + return x.AddressPrefix + } + return "" +} + +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.PrefixLen + } + return nil +} + +var File_envoy_config_core_v3_address_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_address_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x04, 0x50, 0x69, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, 0x03, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x3a, 0x1d, 0x9a, + 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x22, 0x8a, 0x01, 0x0a, + 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x1d, 0x0a, 0x16, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, + 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, 0x76, 0x34, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, + 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15, 0x0a, 0x0e, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a, 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, + 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x6b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0e, + 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x47, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb4, 0x04, 0x0a, 0x0a, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x16, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x52, 0x14, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x70, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x19, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x23, 0x9a, 0xc5, 0x88, + 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4c, 0x0a, 0x0e, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x69, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x70, 0x65, 0x12, 0x62, 0x0a, 0x16, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x80, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_address_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_address_proto_rawDescData = file_envoy_config_core_v3_address_proto_rawDesc +) + +func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_address_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_address_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_address_proto_rawDescData) + }) + return file_envoy_config_core_v3_address_proto_rawDescData +} + +var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{ + (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol + (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe + (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress + (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress + (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive + (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress + (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig + (*Address)(nil), // 7: envoy.config.core.v3.Address + (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 9: google.protobuf.UInt32Value + (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride + (*wrapperspb.BoolValue)(nil), // 11: google.protobuf.BoolValue + (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_core_v3_address_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol + 9, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value + 9, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value + 9, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value + 3, // 4: envoy.config.core.v3.ExtraSourceAddress.address:type_name -> envoy.config.core.v3.SocketAddress + 10, // 5: envoy.config.core.v3.ExtraSourceAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 3, // 6: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress + 11, // 7: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue + 12, // 8: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption + 5, // 9: envoy.config.core.v3.BindConfig.extra_source_addresses:type_name -> envoy.config.core.v3.ExtraSourceAddress + 3, // 10: envoy.config.core.v3.BindConfig.additional_source_addresses:type_name -> envoy.config.core.v3.SocketAddress + 13, // 11: envoy.config.core.v3.BindConfig.local_address_selector:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 12: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress + 1, // 13: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe + 2, // 14: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress + 9, // 15: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_address_proto_init() } +func file_envoy_config_core_v3_address_proto_init() { + if File_envoy_config_core_v3_address_proto != nil { + return + } + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_socket_option_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_address_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Pipe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnvoyInternalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TcpKeepalive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtraSourceAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CidrRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_address_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*EnvoyInternalAddress_ServerListenerName)(nil), + } + file_envoy_config_core_v3_address_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*SocketAddress_PortValue)(nil), + (*SocketAddress_NamedPort)(nil), + } + file_envoy_config_core_v3_address_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Address_SocketAddress)(nil), + (*Address_Pipe)(nil), + (*Address_EnvoyInternalAddress)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_address_proto_rawDesc, + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_address_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_address_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_address_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_address_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_address_proto = out.File + file_envoy_config_core_v3_address_proto_rawDesc = nil + file_envoy_config_core_v3_address_proto_goTypes = nil + file_envoy_config_core_v3_address_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go new file mode 100644 index 000000000..81dea205c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go @@ -0,0 +1,1479 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Pipe with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Pipe) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Pipe with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PipeMultiError, or nil if none found. +func (m *Pipe) ValidateAll() error { + return m.validate(true) +} + +func (m *Pipe) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := PipeValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMode() > 511 { + err := PipeValidationError{ + field: "Mode", + reason: "value must be less than or equal to 511", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PipeMultiError(errors) + } + + return nil +} + +// PipeMultiError is an error wrapping multiple validation errors returned by +// Pipe.ValidateAll() if the designated constraints aren't met. +type PipeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PipeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PipeMultiError) AllErrors() []error { return m } + +// PipeValidationError is the validation error returned by Pipe.Validate if the +// designated constraints aren't met. +type PipeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PipeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PipeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PipeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PipeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PipeValidationError) ErrorName() string { return "PipeValidationError" } + +// Error satisfies the builtin error interface +func (e PipeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPipe.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PipeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PipeValidationError{} + +// Validate checks the field values on EnvoyInternalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EnvoyInternalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EnvoyInternalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EnvoyInternalAddressMultiError, or nil if none found. +func (m *EnvoyInternalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *EnvoyInternalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EndpointId + + oneofAddressNameSpecifierPresent := false + switch v := m.AddressNameSpecifier.(type) { + case *EnvoyInternalAddress_ServerListenerName: + if v == nil { + err := EnvoyInternalAddressValidationError{ + field: "AddressNameSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressNameSpecifierPresent = true + // no validation rules for ServerListenerName + default: + _ = v // ensures v is used + } + if !oneofAddressNameSpecifierPresent { + err := EnvoyInternalAddressValidationError{ + field: "AddressNameSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return EnvoyInternalAddressMultiError(errors) + } + + return nil +} + +// EnvoyInternalAddressMultiError is an error wrapping multiple validation +// errors returned by EnvoyInternalAddress.ValidateAll() if the designated +// constraints aren't met. +type EnvoyInternalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EnvoyInternalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EnvoyInternalAddressMultiError) AllErrors() []error { return m } + +// EnvoyInternalAddressValidationError is the validation error returned by +// EnvoyInternalAddress.Validate if the designated constraints aren't met. +type EnvoyInternalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EnvoyInternalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EnvoyInternalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EnvoyInternalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EnvoyInternalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EnvoyInternalAddressValidationError) ErrorName() string { + return "EnvoyInternalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e EnvoyInternalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEnvoyInternalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EnvoyInternalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EnvoyInternalAddressValidationError{} + +// Validate checks the field values on SocketAddress with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SocketAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketAddress with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SocketAddressMultiError, or +// nil if none found. +func (m *SocketAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := SocketAddress_Protocol_name[int32(m.GetProtocol())]; !ok { + err := SocketAddressValidationError{ + field: "Protocol", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetAddress()) < 1 { + err := SocketAddressValidationError{ + field: "Address", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ResolverName + + // no validation rules for Ipv4Compat + + oneofPortSpecifierPresent := false + switch v := m.PortSpecifier.(type) { + case *SocketAddress_PortValue: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true + + if m.GetPortValue() > 65535 { + err := SocketAddressValidationError{ + field: "PortValue", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *SocketAddress_NamedPort: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true + // no validation rules for NamedPort + default: + _ = v // ensures v is used + } + if !oneofPortSpecifierPresent { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SocketAddressMultiError(errors) + } + + return nil +} + +// SocketAddressMultiError is an error wrapping multiple validation errors +// returned by SocketAddress.ValidateAll() if the designated constraints +// aren't met. +type SocketAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketAddressMultiError) AllErrors() []error { return m } + +// SocketAddressValidationError is the validation error returned by +// SocketAddress.Validate if the designated constraints aren't met. +type SocketAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketAddressValidationError) ErrorName() string { return "SocketAddressValidationError" } + +// Error satisfies the builtin error interface +func (e SocketAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketAddressValidationError{} + +// Validate checks the field values on TcpKeepalive with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TcpKeepalive) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TcpKeepalive with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TcpKeepaliveMultiError, or +// nil if none found. +func (m *TcpKeepalive) ValidateAll() error { + return m.validate(true) +} + +func (m *TcpKeepalive) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetKeepaliveProbes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveProbes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeepaliveTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeepaliveInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TcpKeepaliveMultiError(errors) + } + + return nil +} + +// TcpKeepaliveMultiError is an error wrapping multiple validation errors +// returned by TcpKeepalive.ValidateAll() if the designated constraints aren't met. +type TcpKeepaliveMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TcpKeepaliveMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TcpKeepaliveMultiError) AllErrors() []error { return m } + +// TcpKeepaliveValidationError is the validation error returned by +// TcpKeepalive.Validate if the designated constraints aren't met. +type TcpKeepaliveValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TcpKeepaliveValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TcpKeepaliveValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TcpKeepaliveValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TcpKeepaliveValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TcpKeepaliveValidationError) ErrorName() string { return "TcpKeepaliveValidationError" } + +// Error satisfies the builtin error interface +func (e TcpKeepaliveValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTcpKeepalive.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TcpKeepaliveValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TcpKeepaliveValidationError{} + +// Validate checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExtraSourceAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtraSourceAddressMultiError, or nil if none found. +func (m *ExtraSourceAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtraSourceAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetAddress() == nil { + err := ExtraSourceAddressValidationError{ + field: "Address", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ExtraSourceAddressMultiError(errors) + } + + return nil +} + +// ExtraSourceAddressMultiError is an error wrapping multiple validation errors +// returned by ExtraSourceAddress.ValidateAll() if the designated constraints +// aren't met. +type ExtraSourceAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtraSourceAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtraSourceAddressMultiError) AllErrors() []error { return m } + +// ExtraSourceAddressValidationError is the validation error returned by +// ExtraSourceAddress.Validate if the designated constraints aren't met. +type ExtraSourceAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtraSourceAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtraSourceAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtraSourceAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtraSourceAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtraSourceAddressValidationError) ErrorName() string { + return "ExtraSourceAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e ExtraSourceAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtraSourceAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtraSourceAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtraSourceAddressValidationError{} + +// Validate checks the field values on BindConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *BindConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BindConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BindConfigMultiError, or +// nil if none found. +func (m *BindConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *BindConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSourceAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFreebind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetExtraSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetAdditionalSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLocalAddressSelector()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalAddressSelector()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BindConfigMultiError(errors) + } + + return nil +} + +// BindConfigMultiError is an error wrapping multiple validation errors +// returned by BindConfig.ValidateAll() if the designated constraints aren't met. +type BindConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BindConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BindConfigMultiError) AllErrors() []error { return m } + +// BindConfigValidationError is the validation error returned by +// BindConfig.Validate if the designated constraints aren't met. +type BindConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BindConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BindConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BindConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BindConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BindConfigValidationError) ErrorName() string { return "BindConfigValidationError" } + +// Error satisfies the builtin error interface +func (e BindConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBindConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BindConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BindConfigValidationError{} + +// Validate checks the field values on Address with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Address) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Address with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in AddressMultiError, or nil if none found. +func (m *Address) ValidateAll() error { + return m.validate(true) +} + +func (m *Address) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofAddressPresent := false + switch v := m.Address.(type) { + case *Address_SocketAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetSocketAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Address_Pipe: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetPipe()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPipe()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Address_EnvoyInternalAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetEnvoyInternalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvoyInternalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofAddressPresent { + err := AddressValidationError{ + field: "Address", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AddressMultiError(errors) + } + + return nil +} + +// AddressMultiError is an error wrapping multiple validation errors returned +// by Address.ValidateAll() if the designated constraints aren't met. +type AddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AddressMultiError) AllErrors() []error { return m } + +// AddressValidationError is the validation error returned by Address.Validate +// if the designated constraints aren't met. +type AddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AddressValidationError) ErrorName() string { return "AddressValidationError" } + +// Error satisfies the builtin error interface +func (e AddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AddressValidationError{} + +// Validate checks the field values on CidrRange with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CidrRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CidrRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CidrRangeMultiError, or nil +// if none found. +func (m *CidrRange) ValidateAll() error { + return m.validate(true) +} + +func (m *CidrRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 { + err := CidrRangeValidationError{ + field: "AddressPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetPrefixLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := CidrRangeValidationError{ + field: "PrefixLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CidrRangeMultiError(errors) + } + + return nil +} + +// CidrRangeMultiError is an error wrapping multiple validation errors returned +// by CidrRange.ValidateAll() if the designated constraints aren't met. +type CidrRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CidrRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CidrRangeMultiError) AllErrors() []error { return m } + +// CidrRangeValidationError is the validation error returned by +// CidrRange.Validate if the designated constraints aren't met. +type CidrRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CidrRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CidrRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CidrRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CidrRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" } + +// Error satisfies the builtin error interface +func (e CidrRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCidrRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CidrRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CidrRangeValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go new file mode 100644 index 000000000..cf1777901 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go @@ -0,0 +1,859 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Pipe) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EndpointId) > 0 { + i -= len(m.EndpointId) + copy(dAtA[i:], m.EndpointId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointId))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.AddressNameSpecifier.(*EnvoyInternalAddress_ServerListenerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ServerListenerName) + copy(dAtA[i:], m.ServerListenerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerListenerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SocketAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Ipv4Compat { + i-- + if m.Ipv4Compat { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ResolverName) > 0 { + i -= len(m.ResolverName) + copy(dAtA[i:], m.ResolverName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResolverName))) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.PortSpecifier.(*SocketAddress_NamedPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PortSpecifier.(*SocketAddress_PortValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.Protocol != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Protocol)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SocketAddress_PortValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_PortValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *SocketAddress_NamedPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_NamedPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.NamedPort) + copy(dAtA[i:], m.NamedPort) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NamedPort))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *TcpKeepalive) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpKeepalive) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpKeepalive) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepaliveInterval != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.KeepaliveTime != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.KeepaliveProbes != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveProbes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtraSourceAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtraSourceAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtraSourceAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + size, err := m.SocketOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + size, err := m.Address.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BindConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BindConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BindConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalAddressSelector != nil { + size, err := m.LocalAddressSelector.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.ExtraSourceAddresses) > 0 { + for iNdEx := len(m.ExtraSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ExtraSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for iNdEx := len(m.AdditionalSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.SourceAddress != nil { + size, err := m.SourceAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Address) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Address.(*Address_EnvoyInternalAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_Pipe); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_SocketAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Address_SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SocketAddress != nil { + size, err := m.SocketAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Address_Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Pipe != nil { + size, err := m.Pipe.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Address_EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyInternalAddress != nil { + size, err := m.EnvoyInternalAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CidrRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CidrRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CidrRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrefixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.PrefixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.AddressPrefix) > 0 { + i -= len(m.AddressPrefix) + copy(dAtA[i:], m.AddressPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.AddressNameSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.EndpointId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress_ServerListenerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerListenerName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Protocol)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PortSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ResolverName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ipv4Compat { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SocketAddress_PortValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + return n +} +func (m *SocketAddress_NamedPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamedPort) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TcpKeepalive) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeepaliveProbes != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveProbes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveTime != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveInterval != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExtraSourceAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + l = m.Address.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + l = m.SocketOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BindConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceAddress != nil { + l = m.SourceAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for _, e := range m.AdditionalSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExtraSourceAddresses) > 0 { + for _, e := range m.ExtraSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalAddressSelector != nil { + l = m.LocalAddressSelector.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Address.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Address_SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SocketAddress != nil { + l = m.SocketAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pipe != nil { + l = m.Pipe.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyInternalAddress != nil { + l = m.EnvoyInternalAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CidrRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AddressPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrefixLen != nil { + l = (*wrapperspb.UInt32Value)(m.PrefixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go new file mode 100644 index 000000000..68841ad15 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go @@ -0,0 +1,193 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration defining a jittered exponential back off strategy. +type BackoffStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The base interval to be used for the next back off computation. It should + // be greater than zero and less than or equal to :ref:`max_interval + // `. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between retries. This parameter is optional, + // but must be greater than or equal to the :ref:`base_interval + // ` if set. The default + // is 10 times the :ref:`base_interval + // `. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *BackoffStrategy) Reset() { + *x = BackoffStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackoffStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackoffStrategy) ProtoMessage() {} + +func (x *BackoffStrategy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackoffStrategy.ProtoReflect.Descriptor instead. +func (*BackoffStrategy) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_backoff_proto_rawDescGZIP(), []int{0} +} + +func (x *BackoffStrategy) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *BackoffStrategy) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +var File_envoy_config_core_v3_backoff_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_backoff_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, + 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, + 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, + 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x80, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_backoff_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_backoff_proto_rawDescData = file_envoy_config_core_v3_backoff_proto_rawDesc +) + +func file_envoy_config_core_v3_backoff_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_backoff_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_backoff_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_backoff_proto_rawDescData) + }) + return file_envoy_config_core_v3_backoff_proto_rawDescData +} + +var file_envoy_config_core_v3_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_backoff_proto_goTypes = []interface{}{ + (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_config_core_v3_backoff_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration + 1, // 1: envoy.config.core.v3.BackoffStrategy.max_interval:type_name -> google.protobuf.Duration + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_backoff_proto_init() } +func file_envoy_config_core_v3_backoff_proto_init() { + if File_envoy_config_core_v3_backoff_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_backoff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackoffStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_backoff_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_backoff_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_backoff_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_backoff_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_backoff_proto = out.File + file_envoy_config_core_v3_backoff_proto_rawDesc = nil + file_envoy_config_core_v3_backoff_proto_goTypes = nil + file_envoy_config_core_v3_backoff_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go new file mode 100644 index 000000000..6c9df7628 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on BackoffStrategy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *BackoffStrategy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BackoffStrategy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BackoffStrategyMultiError, or nil if none found. +func (m *BackoffStrategy) ValidateAll() error { + return m.validate(true) +} + +func (m *BackoffStrategy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BackoffStrategyValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := BackoffStrategyValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return BackoffStrategyMultiError(errors) + } + + return nil +} + +// BackoffStrategyMultiError is an error wrapping multiple validation errors +// returned by BackoffStrategy.ValidateAll() if the designated constraints +// aren't met. +type BackoffStrategyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BackoffStrategyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BackoffStrategyMultiError) AllErrors() []error { return m } + +// BackoffStrategyValidationError is the validation error returned by +// BackoffStrategy.Validate if the designated constraints aren't met. +type BackoffStrategyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BackoffStrategyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BackoffStrategyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BackoffStrategyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BackoffStrategyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BackoffStrategyValidationError) ErrorName() string { return "BackoffStrategyValidationError" } + +// Error satisfies the builtin error interface +func (e BackoffStrategyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBackoffStrategy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BackoffStrategyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BackoffStrategyValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go new file mode 100644 index 000000000..3c66ff712 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *BackoffStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackoffStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BackoffStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackoffStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go new file mode 100644 index 000000000..862e09a83 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go @@ -0,0 +1,3242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +type RoutingPriority int32 + +const ( + RoutingPriority_DEFAULT RoutingPriority = 0 + RoutingPriority_HIGH RoutingPriority = 1 +) + +// Enum value maps for RoutingPriority. +var ( + RoutingPriority_name = map[int32]string{ + 0: "DEFAULT", + 1: "HIGH", + } + RoutingPriority_value = map[string]int32{ + "DEFAULT": 0, + "HIGH": 1, + } +) + +func (x RoutingPriority) Enum() *RoutingPriority { + p := new(RoutingPriority) + *p = x + return p +} + +func (x RoutingPriority) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RoutingPriority) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[0].Descriptor() +} + +func (RoutingPriority) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[0] +} + +func (x RoutingPriority) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RoutingPriority.Descriptor instead. +func (RoutingPriority) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0} +} + +// HTTP request method. +type RequestMethod int32 + +const ( + RequestMethod_METHOD_UNSPECIFIED RequestMethod = 0 + RequestMethod_GET RequestMethod = 1 + RequestMethod_HEAD RequestMethod = 2 + RequestMethod_POST RequestMethod = 3 + RequestMethod_PUT RequestMethod = 4 + RequestMethod_DELETE RequestMethod = 5 + RequestMethod_CONNECT RequestMethod = 6 + RequestMethod_OPTIONS RequestMethod = 7 + RequestMethod_TRACE RequestMethod = 8 + RequestMethod_PATCH RequestMethod = 9 +) + +// Enum value maps for RequestMethod. +var ( + RequestMethod_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "GET", + 2: "HEAD", + 3: "POST", + 4: "PUT", + 5: "DELETE", + 6: "CONNECT", + 7: "OPTIONS", + 8: "TRACE", + 9: "PATCH", + } + RequestMethod_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "GET": 1, + "HEAD": 2, + "POST": 3, + "PUT": 4, + "DELETE": 5, + "CONNECT": 6, + "OPTIONS": 7, + "TRACE": 8, + "PATCH": 9, + } +) + +func (x RequestMethod) Enum() *RequestMethod { + p := new(RequestMethod) + *p = x + return p +} + +func (x RequestMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RequestMethod) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[1].Descriptor() +} + +func (RequestMethod) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[1] +} + +func (x RequestMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RequestMethod.Descriptor instead. +func (RequestMethod) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1} +} + +// Identifies the direction of the traffic relative to the local Envoy. +type TrafficDirection int32 + +const ( + // Default option is unspecified. + TrafficDirection_UNSPECIFIED TrafficDirection = 0 + // The transport is used for incoming traffic. + TrafficDirection_INBOUND TrafficDirection = 1 + // The transport is used for outgoing traffic. + TrafficDirection_OUTBOUND TrafficDirection = 2 +) + +// Enum value maps for TrafficDirection. +var ( + TrafficDirection_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "INBOUND", + 2: "OUTBOUND", + } + TrafficDirection_value = map[string]int32{ + "UNSPECIFIED": 0, + "INBOUND": 1, + "OUTBOUND": 2, + } +) + +func (x TrafficDirection) Enum() *TrafficDirection { + p := new(TrafficDirection) + *p = x + return p +} + +func (x TrafficDirection) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[2].Descriptor() +} + +func (TrafficDirection) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[2] +} + +func (x TrafficDirection) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TrafficDirection.Descriptor instead. +func (TrafficDirection) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} +} + +// Describes the supported actions types for key/value pair append action. +type KeyValueAppend_KeyValueAppendAction int32 + +const ( + // If the key already exists, this action will result in the following behavior: + // + // - Comma-concatenated value if multiple values are not allowed. + // - New value added to the list of values if multiple values are allowed. + // + // If the key doesn't exist then this will add pair with specified key and value. + KeyValueAppend_APPEND_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 0 + // This action will add the key/value pair if it doesn't already exist. If the + // key already exists then this will be a no-op. + KeyValueAppend_ADD_IF_ABSENT KeyValueAppend_KeyValueAppendAction = 1 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will add + // the pair with specified key and value. + KeyValueAppend_OVERWRITE_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 2 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will + // be no-op. + KeyValueAppend_OVERWRITE_IF_EXISTS KeyValueAppend_KeyValueAppendAction = 3 +) + +// Enum value maps for KeyValueAppend_KeyValueAppendAction. +var ( + KeyValueAppend_KeyValueAppendAction_name = map[int32]string{ + 0: "APPEND_IF_EXISTS_OR_ADD", + 1: "ADD_IF_ABSENT", + 2: "OVERWRITE_IF_EXISTS_OR_ADD", + 3: "OVERWRITE_IF_EXISTS", + } + KeyValueAppend_KeyValueAppendAction_value = map[string]int32{ + "APPEND_IF_EXISTS_OR_ADD": 0, + "ADD_IF_ABSENT": 1, + "OVERWRITE_IF_EXISTS_OR_ADD": 2, + "OVERWRITE_IF_EXISTS": 3, + } +) + +func (x KeyValueAppend_KeyValueAppendAction) Enum() *KeyValueAppend_KeyValueAppendAction { + p := new(KeyValueAppend_KeyValueAppendAction) + *p = x + return p +} + +func (x KeyValueAppend_KeyValueAppendAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyValueAppend_KeyValueAppendAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[3].Descriptor() +} + +func (KeyValueAppend_KeyValueAppendAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[3] +} + +func (x KeyValueAppend_KeyValueAppendAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use KeyValueAppend_KeyValueAppendAction.Descriptor instead. +func (KeyValueAppend_KeyValueAppendAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10, 0} +} + +// Describes the supported actions types for header append action. +type HeaderValueOption_HeaderAppendAction int32 + +const ( + // If the header already exists, this action will result in: + // + // - Comma-concatenated for predefined inline headers. + // - Duplicate header added in the “HeaderMap“ for other headers. + // + // If the header doesn't exist then this will add new header with specified key and value. + HeaderValueOption_APPEND_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 0 + // This action will add the header if it doesn't already exist. If the header + // already exists then this will be a no-op. + HeaderValueOption_ADD_IF_ABSENT HeaderValueOption_HeaderAppendAction = 1 + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will add the header + // with specified key and value. + HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 2 + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will be no-op. + HeaderValueOption_OVERWRITE_IF_EXISTS HeaderValueOption_HeaderAppendAction = 3 +) + +// Enum value maps for HeaderValueOption_HeaderAppendAction. +var ( + HeaderValueOption_HeaderAppendAction_name = map[int32]string{ + 0: "APPEND_IF_EXISTS_OR_ADD", + 1: "ADD_IF_ABSENT", + 2: "OVERWRITE_IF_EXISTS_OR_ADD", + 3: "OVERWRITE_IF_EXISTS", + } + HeaderValueOption_HeaderAppendAction_value = map[string]int32{ + "APPEND_IF_EXISTS_OR_ADD": 0, + "ADD_IF_ABSENT": 1, + "OVERWRITE_IF_EXISTS_OR_ADD": 2, + "OVERWRITE_IF_EXISTS": 3, + } +) + +func (x HeaderValueOption_HeaderAppendAction) Enum() *HeaderValueOption_HeaderAppendAction { + p := new(HeaderValueOption_HeaderAppendAction) + *p = x + return p +} + +func (x HeaderValueOption_HeaderAppendAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderValueOption_HeaderAppendAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[4].Descriptor() +} + +func (HeaderValueOption_HeaderAppendAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[4] +} + +func (x HeaderValueOption_HeaderAppendAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderValueOption_HeaderAppendAction.Descriptor instead. +func (HeaderValueOption_HeaderAppendAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14, 0} +} + +// Identifies location of where either Envoy runs or where upstream hosts run. +type Locality struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Region this :ref:`zone ` belongs to. + Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"` + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + SubZone string `protobuf:"bytes,3,opt,name=sub_zone,json=subZone,proto3" json:"sub_zone,omitempty"` +} + +func (x *Locality) Reset() { + *x = Locality{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Locality) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Locality) ProtoMessage() {} + +func (x *Locality) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Locality.ProtoReflect.Descriptor instead. +func (*Locality) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0} +} + +func (x *Locality) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *Locality) GetZone() string { + if x != nil { + return x.Zone + } + return "" +} + +func (x *Locality) GetSubZone() string { + if x != nil { + return x.SubZone + } + return "" +} + +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +type BuildVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SemVer version of extension. + Version *v3.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Free-form build information. + // Envoy defines several well known keys in the source/common/version/version.h file + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *BuildVersion) Reset() { + *x = BuildVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BuildVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BuildVersion) ProtoMessage() {} + +func (x *BuildVersion) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BuildVersion.ProtoReflect.Descriptor instead. +func (*BuildVersion) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1} +} + +func (x *BuildVersion) GetVersion() *v3.SemanticVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *BuildVersion) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 7] +type Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.filters.http.router, com.acme.widget. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + Category string `protobuf:"bytes,2,opt,name=category,proto3" json:"category,omitempty"` + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + TypeDescriptor string `protobuf:"bytes,3,opt,name=type_descriptor,json=typeDescriptor,proto3" json:"type_descriptor,omitempty"` + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + Version *BuildVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Indicates that the extension is present but was disabled via dynamic configuration. + Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + // Type URLs of extension configuration protos. + TypeUrls []string `protobuf:"bytes,6,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"` +} + +func (x *Extension) Reset() { + *x = Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Extension) ProtoMessage() {} + +func (x *Extension) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Extension.ProtoReflect.Descriptor instead. +func (*Extension) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} +} + +func (x *Extension) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Extension) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *Extension) GetTypeDescriptor() string { + if x != nil { + return x.TypeDescriptor + } + return "" +} + +func (x *Extension) GetVersion() *BuildVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *Extension) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Extension) GetTypeUrls() []string { + if x != nil { + return x.TypeUrls + } + return nil +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +// [#next-free-field: 13] +type Node struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification + // `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + Metadata *structpb.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike + // other fields in this message). For example, the xDS client may have a shard identifier that + // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the + // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic + // parameter then appears in this field during future discovery requests. + DynamicParameters map[string]*v31.ContextParams `protobuf:"bytes,12,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Locality specifying where the Envoy instance is running. + Locality *Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"` + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + UserAgentName string `protobuf:"bytes,6,opt,name=user_agent_name,json=userAgentName,proto3" json:"user_agent_name,omitempty"` + // Types that are assignable to UserAgentVersionType: + // + // *Node_UserAgentVersion + // *Node_UserAgentBuildVersion + UserAgentVersionType isNode_UserAgentVersionType `protobuf_oneof:"user_agent_version_type"` + // List of extensions and their versions supported by the node. + Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"` + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example “com.acme.feature“. + // See :ref:`the list of features ` that xDS client may + // support. + ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"` + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress “(0.0.0.0,80)“. The field is optional and just a hint. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"` +} + +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Node.ProtoReflect.Descriptor instead. +func (*Node) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{3} +} + +func (x *Node) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Node) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *Node) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Node) GetDynamicParameters() map[string]*v31.ContextParams { + if x != nil { + return x.DynamicParameters + } + return nil +} + +func (x *Node) GetLocality() *Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *Node) GetUserAgentName() string { + if x != nil { + return x.UserAgentName + } + return "" +} + +func (m *Node) GetUserAgentVersionType() isNode_UserAgentVersionType { + if m != nil { + return m.UserAgentVersionType + } + return nil +} + +func (x *Node) GetUserAgentVersion() string { + if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentVersion); ok { + return x.UserAgentVersion + } + return "" +} + +func (x *Node) GetUserAgentBuildVersion() *BuildVersion { + if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentBuildVersion); ok { + return x.UserAgentBuildVersion + } + return nil +} + +func (x *Node) GetExtensions() []*Extension { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Node) GetClientFeatures() []string { + if x != nil { + return x.ClientFeatures + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *Node) GetListeningAddresses() []*Address { + if x != nil { + return x.ListeningAddresses + } + return nil +} + +type isNode_UserAgentVersionType interface { + isNode_UserAgentVersionType() +} + +type Node_UserAgentVersion struct { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + UserAgentVersion string `protobuf:"bytes,7,opt,name=user_agent_version,json=userAgentVersion,proto3,oneof"` +} + +type Node_UserAgentBuildVersion struct { + // Structured version of the entity requesting config. + UserAgentBuildVersion *BuildVersion `protobuf:"bytes,8,opt,name=user_agent_build_version,json=userAgentBuildVersion,proto3,oneof"` +} + +func (*Node_UserAgentVersion) isNode_UserAgentVersionType() {} + +func (*Node_UserAgentBuildVersion) isNode_UserAgentVersionType() {} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// - “{"envoy.lb": {"canary": }}“ This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +// +// [#next-major-version: move to type/metadata/v2] +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“ + // namespace is reserved for Envoy's built-in filters. + // If both “filter_metadata“ and + // :ref:`typed_filter_metadata ` + // fields are present in the metadata with same keys, + // only “typed_filter_metadata“ field will be parsed. + FilterMetadata map[string]*structpb.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“ + // namespace is reserved for Envoy's built-in filters. + // The value is encoded as google.protobuf.Any. + // If both :ref:`filter_metadata ` + // and “typed_filter_metadata“ fields are present in the metadata with same keys, + // only “typed_filter_metadata“ field will be parsed. + TypedFilterMetadata map[string]*anypb.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{4} +} + +func (x *Metadata) GetFilterMetadata() map[string]*structpb.Struct { + if x != nil { + return x.FilterMetadata + } + return nil +} + +func (x *Metadata) GetTypedFilterMetadata() map[string]*anypb.Any { + if x != nil { + return x.TypedFilterMetadata + } + return nil +} + +// Runtime derived uint32 with a default when not specified. +type RuntimeUInt32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue uint32 `protobuf:"varint,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,3,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeUInt32) Reset() { + *x = RuntimeUInt32{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeUInt32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeUInt32) ProtoMessage() {} + +func (x *RuntimeUInt32) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeUInt32.ProtoReflect.Descriptor instead. +func (*RuntimeUInt32) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{5} +} + +func (x *RuntimeUInt32) GetDefaultValue() uint32 { + if x != nil { + return x.DefaultValue + } + return 0 +} + +func (x *RuntimeUInt32) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived percentage with a default when not specified. +type RuntimePercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue *v3.Percent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimePercent) Reset() { + *x = RuntimePercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimePercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimePercent) ProtoMessage() {} + +func (x *RuntimePercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimePercent.ProtoReflect.Descriptor instead. +func (*RuntimePercent) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{6} +} + +func (x *RuntimePercent) GetDefaultValue() *v3.Percent { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimePercent) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived double with a default when not specified. +type RuntimeDouble struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue float64 `protobuf:"fixed64,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeDouble) Reset() { + *x = RuntimeDouble{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeDouble) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeDouble) ProtoMessage() {} + +func (x *RuntimeDouble) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeDouble.ProtoReflect.Descriptor instead. +func (*RuntimeDouble) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeDouble) GetDefaultValue() float64 { + if x != nil { + return x.DefaultValue + } + return 0 +} + +func (x *RuntimeDouble) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived bool with a default when not specified. +type RuntimeFeatureFlag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeFeatureFlag) Reset() { + *x = RuntimeFeatureFlag{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFeatureFlag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFeatureFlag) ProtoMessage() {} + +func (x *RuntimeFeatureFlag) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFeatureFlag.ProtoReflect.Descriptor instead. +func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{8} +} + +func (x *RuntimeFeatureFlag) GetDefaultValue() *wrapperspb.BoolValue { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimeFeatureFlag) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key of the key/value pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the key/value pair. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Key/value pair plus option to control append behavior. This is used to specify +// key/value pairs that should be appended to a set of existing key/value pairs. +type KeyValueAppend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair entry that this option to append or overwrite. + Entry *KeyValue `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + // Describes the action taken to append/overwrite the given value for an existing + // key or to only add this key if it's absent. + Action KeyValueAppend_KeyValueAppendAction `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.core.v3.KeyValueAppend_KeyValueAppendAction" json:"action,omitempty"` +} + +func (x *KeyValueAppend) Reset() { + *x = KeyValueAppend{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueAppend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueAppend) ProtoMessage() {} + +func (x *KeyValueAppend) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueAppend.ProtoReflect.Descriptor instead. +func (*KeyValueAppend) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10} +} + +func (x *KeyValueAppend) GetEntry() *KeyValue { + if x != nil { + return x.Entry + } + return nil +} + +func (x *KeyValueAppend) GetAction() KeyValueAppend_KeyValueAppendAction { + if x != nil { + return x.Action + } + return KeyValueAppend_APPEND_IF_EXISTS_OR_ADD +} + +// Key/value pair to append or remove. +type KeyValueMutation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair to append or overwrite. Only one of “append“ or “remove“ can be set. + Append *KeyValueAppend `protobuf:"bytes,1,opt,name=append,proto3" json:"append,omitempty"` + // Key to remove. Only one of “append“ or “remove“ can be set. + Remove string `protobuf:"bytes,2,opt,name=remove,proto3" json:"remove,omitempty"` +} + +func (x *KeyValueMutation) Reset() { + *x = KeyValueMutation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueMutation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueMutation) ProtoMessage() {} + +func (x *KeyValueMutation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueMutation.ProtoReflect.Descriptor instead. +func (*KeyValueMutation) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11} +} + +func (x *KeyValueMutation) GetAppend() *KeyValueAppend { + if x != nil { + return x.Append + } + return nil +} + +func (x *KeyValueMutation) GetRemove() string { + if x != nil { + return x.Remove + } + return "" +} + +// Query parameter name/value pair. +type QueryParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key of the query parameter. Case sensitive. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the query parameter. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *QueryParameter) Reset() { + *x = QueryParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameter) ProtoMessage() {} + +func (x *QueryParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameter.ProtoReflect.Descriptor instead. +func (*QueryParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12} +} + +func (x *QueryParameter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *QueryParameter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Header name/value pair. +type HeaderValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of “-“. + // Header value is encoded as string. This does not work for non-utf8 characters. + // Only one of “value“ or “raw_value“ can be set. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Header value is encoded as bytes which can support non-utf8 characters. + // Only one of “value“ or “raw_value“ can be set. + RawValue []byte `protobuf:"bytes,3,opt,name=raw_value,json=rawValue,proto3" json:"raw_value,omitempty"` +} + +func (x *HeaderValue) Reset() { + *x = HeaderValue{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValue) ProtoMessage() {} + +func (x *HeaderValue) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. +func (*HeaderValue) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13} +} + +func (x *HeaderValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *HeaderValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *HeaderValue) GetRawValue() []byte { + if x != nil { + return x.RawValue + } + return nil +} + +// Header name/value pair plus option to control append behavior. +type HeaderValueOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name/value pair that this option applies to. + Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // Should the value be appended? If true (default), the value is appended to + // existing values. Otherwise it replaces any existing values. + // This field is deprecated and please use + // :ref:`append_action ` as replacement. + // + // .. note:: + // + // The :ref:`external authorization service ` and + // :ref:`external processor service ` have + // default value (``false``) for this field. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + Append *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` + // Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. + // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + // `. + AppendAction HeaderValueOption_HeaderAppendAction `protobuf:"varint,3,opt,name=append_action,json=appendAction,proto3,enum=envoy.config.core.v3.HeaderValueOption_HeaderAppendAction" json:"append_action,omitempty"` + // Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, + // otherwise they are added. + KeepEmptyValue bool `protobuf:"varint,4,opt,name=keep_empty_value,json=keepEmptyValue,proto3" json:"keep_empty_value,omitempty"` +} + +func (x *HeaderValueOption) Reset() { + *x = HeaderValueOption{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValueOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValueOption) ProtoMessage() {} + +func (x *HeaderValueOption) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead. +func (*HeaderValueOption) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14} +} + +func (x *HeaderValueOption) GetHeader() *HeaderValue { + if x != nil { + return x.Header + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *HeaderValueOption) GetAppend() *wrapperspb.BoolValue { + if x != nil { + return x.Append + } + return nil +} + +func (x *HeaderValueOption) GetAppendAction() HeaderValueOption_HeaderAppendAction { + if x != nil { + return x.AppendAction + } + return HeaderValueOption_APPEND_IF_EXISTS_OR_ADD +} + +func (x *HeaderValueOption) GetKeepEmptyValue() bool { + if x != nil { + return x.KeepEmptyValue + } + return false +} + +// Wrapper for a set of headers. +type HeaderMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []*HeaderValue `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HeaderMap) Reset() { + *x = HeaderMap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderMap) ProtoMessage() {} + +func (x *HeaderMap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead. +func (*HeaderMap) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15} +} + +func (x *HeaderMap) GetHeaders() []*HeaderValue { + if x != nil { + return x.Headers + } + return nil +} + +// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename +// events inside this directory trigger the watch. +type WatchedDirectory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Directory path to watch. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *WatchedDirectory) Reset() { + *x = WatchedDirectory{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchedDirectory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchedDirectory) ProtoMessage() {} + +func (x *WatchedDirectory) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchedDirectory.ProtoReflect.Descriptor instead. +func (*WatchedDirectory) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16} +} + +func (x *WatchedDirectory) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +// Data source consisting of a file, an inline value, or an environment variable. +// [#next-free-field: 6] +type DataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Specifier: + // + // *DataSource_Filename + // *DataSource_InlineBytes + // *DataSource_InlineString + // *DataSource_EnvironmentVariable + Specifier isDataSource_Specifier `protobuf_oneof:"specifier"` + // Watched directory that is watched for file changes. If this is set explicitly, the file + // specified in the “filename“ field will be reloaded when relevant file move events occur. + // + // .. note:: + // + // This field only makes sense when the ``filename`` field is set. + // + // .. note:: + // + // Envoy only updates when the file is replaced by a file move, and not when the file is + // edited in place. + // + // .. note:: + // + // Not all use cases of ``DataSource`` support watching directories. It depends on the + // specific usage of the ``DataSource``. See the documentation of the parent message for + // details. + WatchedDirectory *WatchedDirectory `protobuf:"bytes,5,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` +} + +func (x *DataSource) Reset() { + *x = DataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataSource) ProtoMessage() {} + +func (x *DataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataSource.ProtoReflect.Descriptor instead. +func (*DataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17} +} + +func (m *DataSource) GetSpecifier() isDataSource_Specifier { + if m != nil { + return m.Specifier + } + return nil +} + +func (x *DataSource) GetFilename() string { + if x, ok := x.GetSpecifier().(*DataSource_Filename); ok { + return x.Filename + } + return "" +} + +func (x *DataSource) GetInlineBytes() []byte { + if x, ok := x.GetSpecifier().(*DataSource_InlineBytes); ok { + return x.InlineBytes + } + return nil +} + +func (x *DataSource) GetInlineString() string { + if x, ok := x.GetSpecifier().(*DataSource_InlineString); ok { + return x.InlineString + } + return "" +} + +func (x *DataSource) GetEnvironmentVariable() string { + if x, ok := x.GetSpecifier().(*DataSource_EnvironmentVariable); ok { + return x.EnvironmentVariable + } + return "" +} + +func (x *DataSource) GetWatchedDirectory() *WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +type isDataSource_Specifier interface { + isDataSource_Specifier() +} + +type DataSource_Filename struct { + // Local filesystem data source. + Filename string `protobuf:"bytes,1,opt,name=filename,proto3,oneof"` +} + +type DataSource_InlineBytes struct { + // Bytes inlined in the configuration. + InlineBytes []byte `protobuf:"bytes,2,opt,name=inline_bytes,json=inlineBytes,proto3,oneof"` +} + +type DataSource_InlineString struct { + // String inlined in the configuration. + InlineString string `protobuf:"bytes,3,opt,name=inline_string,json=inlineString,proto3,oneof"` +} + +type DataSource_EnvironmentVariable struct { + // Environment variable data source. + EnvironmentVariable string `protobuf:"bytes,4,opt,name=environment_variable,json=environmentVariable,proto3,oneof"` +} + +func (*DataSource_Filename) isDataSource_Specifier() {} + +func (*DataSource_InlineBytes) isDataSource_Specifier() {} + +func (*DataSource_InlineString) isDataSource_Specifier() {} + +func (*DataSource_EnvironmentVariable) isDataSource_Specifier() {} + +// The message specifies the retry policy of remote data source when fetching fails. +// [#next-free-field: 7] +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies parameters that control :ref:`retry backoff strategy `. + // This parameter is optional, in which case the default base interval is 1000 milliseconds. The + // default maximum interval is 10 times the base interval. + RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + // For details, see :ref:`retry_on `. + RetryOn string `protobuf:"bytes,3,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + // For details, see :ref:`retry_priority `. + RetryPriority *RetryPolicy_RetryPriority `protobuf:"bytes,4,opt,name=retry_priority,json=retryPriority,proto3" json:"retry_priority,omitempty"` + // For details, see :ref:`RetryHostPredicate `. + RetryHostPredicate []*RetryPolicy_RetryHostPredicate `protobuf:"bytes,5,rep,name=retry_host_predicate,json=retryHostPredicate,proto3" json:"retry_host_predicate,omitempty"` + // For details, see :ref:`host_selection_retry_max_attempts `. + HostSelectionRetryMaxAttempts int64 `protobuf:"varint,6,opt,name=host_selection_retry_max_attempts,json=hostSelectionRetryMaxAttempts,proto3" json:"host_selection_retry_max_attempts,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18} +} + +func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy { + if x != nil { + return x.RetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.NumRetries + } + return nil +} + +func (x *RetryPolicy) GetRetryOn() string { + if x != nil { + return x.RetryOn + } + return "" +} + +func (x *RetryPolicy) GetRetryPriority() *RetryPolicy_RetryPriority { + if x != nil { + return x.RetryPriority + } + return nil +} + +func (x *RetryPolicy) GetRetryHostPredicate() []*RetryPolicy_RetryHostPredicate { + if x != nil { + return x.RetryHostPredicate + } + return nil +} + +func (x *RetryPolicy) GetHostSelectionRetryMaxAttempts() int64 { + if x != nil { + return x.HostSelectionRetryMaxAttempts + } + return 0 +} + +// The message specifies how to fetch data from remote and how to verify it. +type RemoteDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP URI to fetch the remote data. + HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"` + // SHA256 string for verifying data. + Sha256 string `protobuf:"bytes,2,opt,name=sha256,proto3" json:"sha256,omitempty"` + // Retry policy for fetching remote data. + RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` +} + +func (x *RemoteDataSource) Reset() { + *x = RemoteDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoteDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoteDataSource) ProtoMessage() {} + +func (x *RemoteDataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead. +func (*RemoteDataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19} +} + +func (x *RemoteDataSource) GetHttpUri() *HttpUri { + if x != nil { + return x.HttpUri + } + return nil +} + +func (x *RemoteDataSource) GetSha256() string { + if x != nil { + return x.Sha256 + } + return "" +} + +func (x *RemoteDataSource) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +// Async data source which support async data fetch. +type AsyncDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Specifier: + // + // *AsyncDataSource_Local + // *AsyncDataSource_Remote + Specifier isAsyncDataSource_Specifier `protobuf_oneof:"specifier"` +} + +func (x *AsyncDataSource) Reset() { + *x = AsyncDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsyncDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsyncDataSource) ProtoMessage() {} + +func (x *AsyncDataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead. +func (*AsyncDataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{20} +} + +func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier { + if m != nil { + return m.Specifier + } + return nil +} + +func (x *AsyncDataSource) GetLocal() *DataSource { + if x, ok := x.GetSpecifier().(*AsyncDataSource_Local); ok { + return x.Local + } + return nil +} + +func (x *AsyncDataSource) GetRemote() *RemoteDataSource { + if x, ok := x.GetSpecifier().(*AsyncDataSource_Remote); ok { + return x.Remote + } + return nil +} + +type isAsyncDataSource_Specifier interface { + isAsyncDataSource_Specifier() +} + +type AsyncDataSource_Local struct { + // Local async data source. + Local *DataSource `protobuf:"bytes,1,opt,name=local,proto3,oneof"` +} + +type AsyncDataSource_Remote struct { + // Remote async data source. + Remote *RemoteDataSource `protobuf:"bytes,2,opt,name=remote,proto3,oneof"` +} + +func (*AsyncDataSource_Local) isAsyncDataSource_Specifier() {} + +func (*AsyncDataSource_Remote) isAsyncDataSource_Specifier() {} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +type TransportSocket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + // + // Types that are assignable to ConfigType: + // + // *TransportSocket_TypedConfig + ConfigType isTransportSocket_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *TransportSocket) Reset() { + *x = TransportSocket{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransportSocket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransportSocket) ProtoMessage() {} + +func (x *TransportSocket) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead. +func (*TransportSocket) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{21} +} + +func (x *TransportSocket) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *TransportSocket) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isTransportSocket_ConfigType interface { + isTransportSocket_ConfigType() +} + +type TransportSocket_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. +type RuntimeFractionalPercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if the runtime value's for the numerator/denominator keys are not available. + DefaultValue *v3.FractionalPercent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key for a YAML representation of a FractionalPercent. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeFractionalPercent) Reset() { + *x = RuntimeFractionalPercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFractionalPercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFractionalPercent) ProtoMessage() {} + +func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead. +func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{22} +} + +func (x *RuntimeFractionalPercent) GetDefaultValue() *v3.FractionalPercent { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimeFractionalPercent) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +type ControlPlane struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` +} + +func (x *ControlPlane) Reset() { + *x = ControlPlane{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlPlane) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlPlane) ProtoMessage() {} + +func (x *ControlPlane) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead. +func (*ControlPlane) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{23} +} + +func (x *ControlPlane) GetIdentifier() string { + if x != nil { + return x.Identifier + } + return "" +} + +// See :ref:`RetryPriority `. +type RetryPolicy_RetryPriority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryPriority_TypedConfig + ConfigType isRetryPolicy_RetryPriority_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryPriority) Reset() { + *x = RetryPolicy_RetryPriority{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryPriority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryPriority) ProtoMessage() {} + +func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *RetryPolicy_RetryPriority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryPriority_ConfigType interface { + isRetryPolicy_RetryPriority_ConfigType() +} + +type RetryPolicy_RetryPriority_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} + +// See :ref:`RetryHostPredicate `. +type RetryPolicy_RetryHostPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryHostPredicate_TypedConfig + ConfigType isRetryPolicy_RetryHostPredicate_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryHostPredicate) Reset() { + *x = RetryPolicy_RetryHostPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryHostPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} + +func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *RetryPolicy_RetryHostPredicate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHostPredicate_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryHostPredicate_ConfigType interface { + isRetryPolicy_RetryHostPredicate_ConfigType() +} + +type RetryPolicy_RetryHostPredicate_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} + +var File_envoy_config_core_v3_base_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_base_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x74, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, + 0x62, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x62, 0x5a, 0x6f, 0x6e, 0x65, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x8c, 0x02, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x34, 0x0a, + 0x0f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, + 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb2, + 0x06, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x60, 0x0a, 0x12, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x75, + 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x75, 0x73, 0x65, 0x72, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x18, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x15, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x12, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x69, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x79, 0x0a, 0x15, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x22, 0x77, 0x0a, 0x0e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x3b, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0c, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x22, 0xb6, + 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x3f, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x0e, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x5b, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, + 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, + 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, + 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, + 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x22, 0x73, 0x0a, 0x10, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, + 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, + 0x65, 0x6e, 0x64, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xfa, 0x42, 0x06, + 0x72, 0x04, 0x28, 0x80, 0x80, 0x01, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x41, + 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, + 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x21, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x3a, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x1d, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xd9, 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, + 0x0d, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, + 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x7d, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, + 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, + 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, + 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, + 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, + 0x03, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, + 0x09, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xc9, 0x02, 0x0a, + 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, + 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x53, 0x0a, 0x11, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, + 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xee, 0x05, 0x0a, 0x0b, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x56, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x14, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x1a, 0x76, + 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x7b, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, + 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, + 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, + 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, + 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, + 0x65, 0x79, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, + 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, + 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, + 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, + 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, + 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x02, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_base_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_base_proto_rawDescData = file_envoy_config_core_v3_base_proto_rawDesc +) + +func file_envoy_config_core_v3_base_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_base_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_base_proto_rawDescData) + }) + return file_envoy_config_core_v3_base_proto_rawDescData +} + +var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{ + (RoutingPriority)(0), // 0: envoy.config.core.v3.RoutingPriority + (RequestMethod)(0), // 1: envoy.config.core.v3.RequestMethod + (TrafficDirection)(0), // 2: envoy.config.core.v3.TrafficDirection + (KeyValueAppend_KeyValueAppendAction)(0), // 3: envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + (HeaderValueOption_HeaderAppendAction)(0), // 4: envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + (*Locality)(nil), // 5: envoy.config.core.v3.Locality + (*BuildVersion)(nil), // 6: envoy.config.core.v3.BuildVersion + (*Extension)(nil), // 7: envoy.config.core.v3.Extension + (*Node)(nil), // 8: envoy.config.core.v3.Node + (*Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*RuntimeUInt32)(nil), // 10: envoy.config.core.v3.RuntimeUInt32 + (*RuntimePercent)(nil), // 11: envoy.config.core.v3.RuntimePercent + (*RuntimeDouble)(nil), // 12: envoy.config.core.v3.RuntimeDouble + (*RuntimeFeatureFlag)(nil), // 13: envoy.config.core.v3.RuntimeFeatureFlag + (*KeyValue)(nil), // 14: envoy.config.core.v3.KeyValue + (*KeyValueAppend)(nil), // 15: envoy.config.core.v3.KeyValueAppend + (*KeyValueMutation)(nil), // 16: envoy.config.core.v3.KeyValueMutation + (*QueryParameter)(nil), // 17: envoy.config.core.v3.QueryParameter + (*HeaderValue)(nil), // 18: envoy.config.core.v3.HeaderValue + (*HeaderValueOption)(nil), // 19: envoy.config.core.v3.HeaderValueOption + (*HeaderMap)(nil), // 20: envoy.config.core.v3.HeaderMap + (*WatchedDirectory)(nil), // 21: envoy.config.core.v3.WatchedDirectory + (*DataSource)(nil), // 22: envoy.config.core.v3.DataSource + (*RetryPolicy)(nil), // 23: envoy.config.core.v3.RetryPolicy + (*RemoteDataSource)(nil), // 24: envoy.config.core.v3.RemoteDataSource + (*AsyncDataSource)(nil), // 25: envoy.config.core.v3.AsyncDataSource + (*TransportSocket)(nil), // 26: envoy.config.core.v3.TransportSocket + (*RuntimeFractionalPercent)(nil), // 27: envoy.config.core.v3.RuntimeFractionalPercent + (*ControlPlane)(nil), // 28: envoy.config.core.v3.ControlPlane + nil, // 29: envoy.config.core.v3.Node.DynamicParametersEntry + nil, // 30: envoy.config.core.v3.Metadata.FilterMetadataEntry + nil, // 31: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + (*RetryPolicy_RetryPriority)(nil), // 32: envoy.config.core.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 33: envoy.config.core.v3.RetryPolicy.RetryHostPredicate + (*v3.SemanticVersion)(nil), // 34: envoy.type.v3.SemanticVersion + (*structpb.Struct)(nil), // 35: google.protobuf.Struct + (*Address)(nil), // 36: envoy.config.core.v3.Address + (*v3.Percent)(nil), // 37: envoy.type.v3.Percent + (*wrapperspb.BoolValue)(nil), // 38: google.protobuf.BoolValue + (*BackoffStrategy)(nil), // 39: envoy.config.core.v3.BackoffStrategy + (*wrapperspb.UInt32Value)(nil), // 40: google.protobuf.UInt32Value + (*HttpUri)(nil), // 41: envoy.config.core.v3.HttpUri + (*anypb.Any)(nil), // 42: google.protobuf.Any + (*v3.FractionalPercent)(nil), // 43: envoy.type.v3.FractionalPercent + (*v31.ContextParams)(nil), // 44: xds.core.v3.ContextParams +} +var file_envoy_config_core_v3_base_proto_depIdxs = []int32{ + 34, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion + 35, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct + 6, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion + 35, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct + 29, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry + 5, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality + 6, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion + 7, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension + 36, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address + 30, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry + 31, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + 37, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent + 38, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue + 14, // 13: envoy.config.core.v3.KeyValueAppend.entry:type_name -> envoy.config.core.v3.KeyValue + 3, // 14: envoy.config.core.v3.KeyValueAppend.action:type_name -> envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + 15, // 15: envoy.config.core.v3.KeyValueMutation.append:type_name -> envoy.config.core.v3.KeyValueAppend + 18, // 16: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue + 38, // 17: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue + 4, // 18: envoy.config.core.v3.HeaderValueOption.append_action:type_name -> envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + 18, // 19: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue + 21, // 20: envoy.config.core.v3.DataSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 39, // 21: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy + 40, // 22: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 32, // 23: envoy.config.core.v3.RetryPolicy.retry_priority:type_name -> envoy.config.core.v3.RetryPolicy.RetryPriority + 33, // 24: envoy.config.core.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.core.v3.RetryPolicy.RetryHostPredicate + 41, // 25: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri + 23, // 26: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 22, // 27: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource + 24, // 28: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource + 42, // 29: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any + 43, // 30: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent + 44, // 31: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams + 35, // 32: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct + 42, // 33: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any + 42, // 34: envoy.config.core.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 42, // 35: envoy.config.core.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_base_proto_init() } +func file_envoy_config_core_v3_base_proto_init() { + if File_envoy_config_core_v3_base_proto != nil { + return + } + file_envoy_config_core_v3_address_proto_init() + file_envoy_config_core_v3_backoff_proto_init() + file_envoy_config_core_v3_http_uri_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locality); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeUInt32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimePercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeDouble); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFeatureFlag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueAppend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueMutation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValueOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchedDirectory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsyncDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransportSocket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFractionalPercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlPlane); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryPriority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryHostPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_base_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Node_UserAgentVersion)(nil), + (*Node_UserAgentBuildVersion)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{ + (*DataSource_Filename)(nil), + (*DataSource_InlineBytes)(nil), + (*DataSource_InlineString)(nil), + (*DataSource_EnvironmentVariable)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*AsyncDataSource_Local)(nil), + (*AsyncDataSource_Remote)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[21].OneofWrappers = []interface{}{ + (*TransportSocket_TypedConfig)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryPriority_TypedConfig)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[28].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_base_proto_rawDesc, + NumEnums: 5, + NumMessages: 29, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_base_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_base_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_base_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_base_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_base_proto = out.File + file_envoy_config_core_v3_base_proto_rawDesc = nil + file_envoy_config_core_v3_base_proto_goTypes = nil + file_envoy_config_core_v3_base_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go new file mode 100644 index 000000000..09d836dac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go @@ -0,0 +1,4164 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Locality with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Locality) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Locality with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LocalityMultiError, or nil +// if none found. +func (m *Locality) ValidateAll() error { + return m.validate(true) +} + +func (m *Locality) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Region + + // no validation rules for Zone + + // no validation rules for SubZone + + if len(errors) > 0 { + return LocalityMultiError(errors) + } + + return nil +} + +// LocalityMultiError is an error wrapping multiple validation errors returned +// by Locality.ValidateAll() if the designated constraints aren't met. +type LocalityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityMultiError) AllErrors() []error { return m } + +// LocalityValidationError is the validation error returned by +// Locality.Validate if the designated constraints aren't met. +type LocalityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityValidationError) ErrorName() string { return "LocalityValidationError" } + +// Error satisfies the builtin error interface +func (e LocalityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocality.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityValidationError{} + +// Validate checks the field values on BuildVersion with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *BuildVersion) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BuildVersion with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BuildVersionMultiError, or +// nil if none found. +func (m *BuildVersion) ValidateAll() error { + return m.validate(true) +} + +func (m *BuildVersion) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BuildVersionMultiError(errors) + } + + return nil +} + +// BuildVersionMultiError is an error wrapping multiple validation errors +// returned by BuildVersion.ValidateAll() if the designated constraints aren't met. +type BuildVersionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BuildVersionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BuildVersionMultiError) AllErrors() []error { return m } + +// BuildVersionValidationError is the validation error returned by +// BuildVersion.Validate if the designated constraints aren't met. +type BuildVersionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BuildVersionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BuildVersionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BuildVersionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BuildVersionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BuildVersionValidationError) ErrorName() string { return "BuildVersionValidationError" } + +// Error satisfies the builtin error interface +func (e BuildVersionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBuildVersion.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BuildVersionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BuildVersionValidationError{} + +// Validate checks the field values on Extension with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Extension) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Extension with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ExtensionMultiError, or nil +// if none found. +func (m *Extension) ValidateAll() error { + return m.validate(true) +} + +func (m *Extension) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for Category + + // no validation rules for TypeDescriptor + + if all { + switch v := interface{}(m.GetVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Disabled + + if len(errors) > 0 { + return ExtensionMultiError(errors) + } + + return nil +} + +// ExtensionMultiError is an error wrapping multiple validation errors returned +// by Extension.ValidateAll() if the designated constraints aren't met. +type ExtensionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionMultiError) AllErrors() []error { return m } + +// ExtensionValidationError is the validation error returned by +// Extension.Validate if the designated constraints aren't met. +type ExtensionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionValidationError) ErrorName() string { return "ExtensionValidationError" } + +// Error satisfies the builtin error interface +func (e ExtensionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtension.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionValidationError{} + +// Validate checks the field values on Node with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Node) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Node with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in NodeMultiError, or nil if none found. +func (m *Node) ValidateAll() error { + return m.validate(true) +} + +func (m *Node) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for Cluster + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetDynamicParameters())) + i := 0 + for key := range m.GetDynamicParameters() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetDynamicParameters()[key] + _ = val + + // no validation rules for DynamicParameters[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UserAgentName + + for idx, item := range m.GetExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetListeningAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + switch v := m.UserAgentVersionType.(type) { + case *Node_UserAgentVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for UserAgentVersion + case *Node_UserAgentBuildVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetUserAgentBuildVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUserAgentBuildVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return NodeMultiError(errors) + } + + return nil +} + +// NodeMultiError is an error wrapping multiple validation errors returned by +// Node.ValidateAll() if the designated constraints aren't met. +type NodeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NodeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NodeMultiError) AllErrors() []error { return m } + +// NodeValidationError is the validation error returned by Node.Validate if the +// designated constraints aren't met. +type NodeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NodeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NodeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NodeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NodeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NodeValidationError) ErrorName() string { return "NodeValidationError" } + +// Error satisfies the builtin error interface +func (e NodeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNode.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NodeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NodeValidationError{} + +// Validate checks the field values on Metadata with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Metadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataMultiError, or nil +// if none found. +func (m *Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + { + sorted_keys := make([]string, len(m.GetFilterMetadata())) + i := 0 + for key := range m.GetFilterMetadata() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilterMetadata()[key] + _ = val + + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + { + sorted_keys := make([]string, len(m.GetTypedFilterMetadata())) + i := 0 + for key := range m.GetTypedFilterMetadata() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedFilterMetadata()[key] + _ = val + + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return MetadataMultiError(errors) + } + + return nil +} + +// MetadataMultiError is an error wrapping multiple validation errors returned +// by Metadata.ValidateAll() if the designated constraints aren't met. +type MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMultiError) AllErrors() []error { return m } + +// MetadataValidationError is the validation error returned by +// Metadata.Validate if the designated constraints aren't met. +type MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataValidationError{} + +// Validate checks the field values on RuntimeUInt32 with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeUInt32) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeUInt32 with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeUInt32MultiError, or +// nil if none found. +func (m *RuntimeUInt32) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeUInt32) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DefaultValue + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeUInt32ValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeUInt32MultiError(errors) + } + + return nil +} + +// RuntimeUInt32MultiError is an error wrapping multiple validation errors +// returned by RuntimeUInt32.ValidateAll() if the designated constraints +// aren't met. +type RuntimeUInt32MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeUInt32MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeUInt32MultiError) AllErrors() []error { return m } + +// RuntimeUInt32ValidationError is the validation error returned by +// RuntimeUInt32.Validate if the designated constraints aren't met. +type RuntimeUInt32ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeUInt32ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeUInt32ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeUInt32ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeUInt32ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeUInt32ValidationError) ErrorName() string { return "RuntimeUInt32ValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeUInt32ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeUInt32.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeUInt32ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeUInt32ValidationError{} + +// Validate checks the field values on RuntimePercent with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimePercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimePercent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimePercentMultiError, +// or nil if none found. +func (m *RuntimePercent) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimePercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimePercentValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimePercentMultiError(errors) + } + + return nil +} + +// RuntimePercentMultiError is an error wrapping multiple validation errors +// returned by RuntimePercent.ValidateAll() if the designated constraints +// aren't met. +type RuntimePercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimePercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimePercentMultiError) AllErrors() []error { return m } + +// RuntimePercentValidationError is the validation error returned by +// RuntimePercent.Validate if the designated constraints aren't met. +type RuntimePercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimePercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimePercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimePercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimePercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimePercentValidationError) ErrorName() string { return "RuntimePercentValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimePercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimePercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimePercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimePercentValidationError{} + +// Validate checks the field values on RuntimeDouble with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeDouble) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeDouble with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeDoubleMultiError, or +// nil if none found. +func (m *RuntimeDouble) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeDouble) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DefaultValue + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeDoubleValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeDoubleMultiError(errors) + } + + return nil +} + +// RuntimeDoubleMultiError is an error wrapping multiple validation errors +// returned by RuntimeDouble.ValidateAll() if the designated constraints +// aren't met. +type RuntimeDoubleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeDoubleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeDoubleMultiError) AllErrors() []error { return m } + +// RuntimeDoubleValidationError is the validation error returned by +// RuntimeDouble.Validate if the designated constraints aren't met. +type RuntimeDoubleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeDoubleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeDoubleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeDoubleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeDoubleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeDoubleValidationError) ErrorName() string { return "RuntimeDoubleValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeDoubleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeDouble.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeDoubleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeDoubleValidationError{} + +// Validate checks the field values on RuntimeFeatureFlag with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeFeatureFlag) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFeatureFlag with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeFeatureFlagMultiError, or nil if none found. +func (m *RuntimeFeatureFlag) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFeatureFlag) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValue() == nil { + err := RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeFeatureFlagValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeFeatureFlagMultiError(errors) + } + + return nil +} + +// RuntimeFeatureFlagMultiError is an error wrapping multiple validation errors +// returned by RuntimeFeatureFlag.ValidateAll() if the designated constraints +// aren't met. +type RuntimeFeatureFlagMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFeatureFlagMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFeatureFlagMultiError) AllErrors() []error { return m } + +// RuntimeFeatureFlagValidationError is the validation error returned by +// RuntimeFeatureFlag.Validate if the designated constraints aren't met. +type RuntimeFeatureFlagValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFeatureFlagValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFeatureFlagValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFeatureFlagValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFeatureFlagValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFeatureFlagValidationError) ErrorName() string { + return "RuntimeFeatureFlagValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeFeatureFlagValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFeatureFlag.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFeatureFlagValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFeatureFlagValidationError{} + +// Validate checks the field values on KeyValue with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueMultiError, or nil +// if none found. +func (m *KeyValue) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := KeyValueValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetKey()) > 16384 { + err := KeyValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Value + + if len(errors) > 0 { + return KeyValueMultiError(errors) + } + + return nil +} + +// KeyValueMultiError is an error wrapping multiple validation errors returned +// by KeyValue.ValidateAll() if the designated constraints aren't met. +type KeyValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueMultiError) AllErrors() []error { return m } + +// KeyValueValidationError is the validation error returned by +// KeyValue.Validate if the designated constraints aren't met. +type KeyValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueValidationError{} + +// Validate checks the field values on KeyValueAppend with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValueAppend) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValueAppend with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueAppendMultiError, +// or nil if none found. +func (m *KeyValueAppend) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValueAppend) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetEntry() == nil { + err := KeyValueAppendValidationError{ + field: "Entry", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntry()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := KeyValueAppend_KeyValueAppendAction_name[int32(m.GetAction())]; !ok { + err := KeyValueAppendValidationError{ + field: "Action", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return KeyValueAppendMultiError(errors) + } + + return nil +} + +// KeyValueAppendMultiError is an error wrapping multiple validation errors +// returned by KeyValueAppend.ValidateAll() if the designated constraints +// aren't met. +type KeyValueAppendMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueAppendMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueAppendMultiError) AllErrors() []error { return m } + +// KeyValueAppendValidationError is the validation error returned by +// KeyValueAppend.Validate if the designated constraints aren't met. +type KeyValueAppendValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueAppendValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueAppendValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueAppendValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueAppendValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueAppendValidationError) ErrorName() string { return "KeyValueAppendValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueAppendValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValueAppend.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueAppendValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueAppendValidationError{} + +// Validate checks the field values on KeyValueMutation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *KeyValueMutation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValueMutation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KeyValueMutationMultiError, or nil if none found. +func (m *KeyValueMutation) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValueMutation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAppend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRemove()) > 16384 { + err := KeyValueMutationValidationError{ + field: "Remove", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return KeyValueMutationMultiError(errors) + } + + return nil +} + +// KeyValueMutationMultiError is an error wrapping multiple validation errors +// returned by KeyValueMutation.ValidateAll() if the designated constraints +// aren't met. +type KeyValueMutationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueMutationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueMutationMultiError) AllErrors() []error { return m } + +// KeyValueMutationValidationError is the validation error returned by +// KeyValueMutation.Validate if the designated constraints aren't met. +type KeyValueMutationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueMutationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueMutationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueMutationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueMutationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueMutationValidationError) ErrorName() string { return "KeyValueMutationValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueMutationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValueMutation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueMutationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueMutationValidationError{} + +// Validate checks the field values on QueryParameter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *QueryParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QueryParameter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in QueryParameterMultiError, +// or nil if none found. +func (m *QueryParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *QueryParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := QueryParameterValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Value + + if len(errors) > 0 { + return QueryParameterMultiError(errors) + } + + return nil +} + +// QueryParameterMultiError is an error wrapping multiple validation errors +// returned by QueryParameter.ValidateAll() if the designated constraints +// aren't met. +type QueryParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QueryParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QueryParameterMultiError) AllErrors() []error { return m } + +// QueryParameterValidationError is the validation error returned by +// QueryParameter.Validate if the designated constraints aren't met. +type QueryParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QueryParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QueryParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QueryParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QueryParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QueryParameterValidationError) ErrorName() string { return "QueryParameterValidationError" } + +// Error satisfies the builtin error interface +func (e QueryParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQueryParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QueryParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QueryParameterValidationError{} + +// Validate checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderValueMultiError, or +// nil if none found. +func (m *HeaderValue) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetKey()) > 16384 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) { + err := HeaderValueValidationError{ + field: "Key", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetValue()) > 16384 { + err := HeaderValueValidationError{ + field: "Value", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) { + err := HeaderValueValidationError{ + field: "Value", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if l := len(m.GetRawValue()); l < 0 || l > 16384 { + err := HeaderValueValidationError{ + field: "RawValue", + reason: "value length must be between 0 and 16384 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HeaderValueMultiError(errors) + } + + return nil +} + +// HeaderValueMultiError is an error wrapping multiple validation errors +// returned by HeaderValue.ValidateAll() if the designated constraints aren't met. +type HeaderValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueMultiError) AllErrors() []error { return m } + +// HeaderValueValidationError is the validation error returned by +// HeaderValue.Validate if the designated constraints aren't met. +type HeaderValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueValidationError{} + +var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HeaderValueOption with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HeaderValueOption) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValueOption with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HeaderValueOptionMultiError, or nil if none found. +func (m *HeaderValueOption) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValueOption) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHeader() == nil { + err := HeaderValueOptionValidationError{ + field: "Header", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAppend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := HeaderValueOption_HeaderAppendAction_name[int32(m.GetAppendAction())]; !ok { + err := HeaderValueOptionValidationError{ + field: "AppendAction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for KeepEmptyValue + + if len(errors) > 0 { + return HeaderValueOptionMultiError(errors) + } + + return nil +} + +// HeaderValueOptionMultiError is an error wrapping multiple validation errors +// returned by HeaderValueOption.ValidateAll() if the designated constraints +// aren't met. +type HeaderValueOptionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueOptionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueOptionMultiError) AllErrors() []error { return m } + +// HeaderValueOptionValidationError is the validation error returned by +// HeaderValueOption.Validate if the designated constraints aren't met. +type HeaderValueOptionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueOptionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueOptionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueOptionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueOptionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueOptionValidationError) ErrorName() string { + return "HeaderValueOptionValidationError" +} + +// Error satisfies the builtin error interface +func (e HeaderValueOptionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValueOption.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueOptionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueOptionValidationError{} + +// Validate checks the field values on HeaderMap with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMap with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMapMultiError, or nil +// if none found. +func (m *HeaderMap) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HeaderMapMultiError(errors) + } + + return nil +} + +// HeaderMapMultiError is an error wrapping multiple validation errors returned +// by HeaderMap.ValidateAll() if the designated constraints aren't met. +type HeaderMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderMapMultiError) AllErrors() []error { return m } + +// HeaderMapValidationError is the validation error returned by +// HeaderMap.Validate if the designated constraints aren't met. +type HeaderMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderMapValidationError) ErrorName() string { return "HeaderMapValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderMapValidationError{} + +// Validate checks the field values on WatchedDirectory with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WatchedDirectory) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WatchedDirectory with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WatchedDirectoryMultiError, or nil if none found. +func (m *WatchedDirectory) ValidateAll() error { + return m.validate(true) +} + +func (m *WatchedDirectory) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := WatchedDirectoryValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return WatchedDirectoryMultiError(errors) + } + + return nil +} + +// WatchedDirectoryMultiError is an error wrapping multiple validation errors +// returned by WatchedDirectory.ValidateAll() if the designated constraints +// aren't met. +type WatchedDirectoryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchedDirectoryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchedDirectoryMultiError) AllErrors() []error { return m } + +// WatchedDirectoryValidationError is the validation error returned by +// WatchedDirectory.Validate if the designated constraints aren't met. +type WatchedDirectoryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchedDirectoryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchedDirectoryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchedDirectoryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchedDirectoryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchedDirectoryValidationError) ErrorName() string { return "WatchedDirectoryValidationError" } + +// Error satisfies the builtin error interface +func (e WatchedDirectoryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchedDirectory.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchedDirectoryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchedDirectoryValidationError{} + +// Validate checks the field values on DataSource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DataSource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DataSourceMultiError, or +// nil if none found. +func (m *DataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *DataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { + case *DataSource_Filename: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if utf8.RuneCountInString(m.GetFilename()) < 1 { + err := DataSourceValidationError{ + field: "Filename", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *DataSource_InlineBytes: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + // no validation rules for InlineBytes + case *DataSource_InlineString: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + // no validation rules for InlineString + case *DataSource_EnvironmentVariable: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if utf8.RuneCountInString(m.GetEnvironmentVariable()) < 1 { + err := DataSourceValidationError{ + field: "EnvironmentVariable", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { + err := DataSourceValidationError{ + field: "Specifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DataSourceMultiError(errors) + } + + return nil +} + +// DataSourceMultiError is an error wrapping multiple validation errors +// returned by DataSource.ValidateAll() if the designated constraints aren't met. +type DataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DataSourceMultiError) AllErrors() []error { return m } + +// DataSourceValidationError is the validation error returned by +// DataSource.Validate if the designated constraints aren't met. +type DataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DataSourceValidationError) ErrorName() string { return "DataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e DataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DataSourceValidationError{} + +// Validate checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RetryPolicyMultiError, or +// nil if none found. +func (m *RetryPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetNumRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RetryOn + + if all { + switch v := interface{}(m.GetRetryPriority()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPriority()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetryHostPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for HostSelectionRetryMaxAttempts + + if len(errors) > 0 { + return RetryPolicyMultiError(errors) + } + + return nil +} + +// RetryPolicyMultiError is an error wrapping multiple validation errors +// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met. +type RetryPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicyMultiError) AllErrors() []error { return m } + +// RetryPolicyValidationError is the validation error returned by +// RetryPolicy.Validate if the designated constraints aren't met. +type RetryPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e RetryPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicyValidationError{} + +// Validate checks the field values on RemoteDataSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RemoteDataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RemoteDataSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RemoteDataSourceMultiError, or nil if none found. +func (m *RemoteDataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *RemoteDataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHttpUri() == nil { + err := RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHttpUri()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetSha256()) < 1 { + err := RemoteDataSourceValidationError{ + field: "Sha256", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RemoteDataSourceMultiError(errors) + } + + return nil +} + +// RemoteDataSourceMultiError is an error wrapping multiple validation errors +// returned by RemoteDataSource.ValidateAll() if the designated constraints +// aren't met. +type RemoteDataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RemoteDataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RemoteDataSourceMultiError) AllErrors() []error { return m } + +// RemoteDataSourceValidationError is the validation error returned by +// RemoteDataSource.Validate if the designated constraints aren't met. +type RemoteDataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RemoteDataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RemoteDataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RemoteDataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RemoteDataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RemoteDataSourceValidationError) ErrorName() string { return "RemoteDataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e RemoteDataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRemoteDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RemoteDataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RemoteDataSourceValidationError{} + +// Validate checks the field values on AsyncDataSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AsyncDataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AsyncDataSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AsyncDataSourceMultiError, or nil if none found. +func (m *AsyncDataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *AsyncDataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { + case *AsyncDataSource_Local: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocal()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocal()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AsyncDataSource_Remote: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRemote()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemote()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AsyncDataSourceMultiError(errors) + } + + return nil +} + +// AsyncDataSourceMultiError is an error wrapping multiple validation errors +// returned by AsyncDataSource.ValidateAll() if the designated constraints +// aren't met. +type AsyncDataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AsyncDataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AsyncDataSourceMultiError) AllErrors() []error { return m } + +// AsyncDataSourceValidationError is the validation error returned by +// AsyncDataSource.Validate if the designated constraints aren't met. +type AsyncDataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AsyncDataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AsyncDataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AsyncDataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AsyncDataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AsyncDataSourceValidationError) ErrorName() string { return "AsyncDataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e AsyncDataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAsyncDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AsyncDataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AsyncDataSourceValidationError{} + +// Validate checks the field values on TransportSocket with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TransportSocket) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TransportSocket with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TransportSocketMultiError, or nil if none found. +func (m *TransportSocket) ValidateAll() error { + return m.validate(true) +} + +func (m *TransportSocket) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TransportSocketValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *TransportSocket_TypedConfig: + if v == nil { + err := TransportSocketValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TransportSocketMultiError(errors) + } + + return nil +} + +// TransportSocketMultiError is an error wrapping multiple validation errors +// returned by TransportSocket.ValidateAll() if the designated constraints +// aren't met. +type TransportSocketMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TransportSocketMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TransportSocketMultiError) AllErrors() []error { return m } + +// TransportSocketValidationError is the validation error returned by +// TransportSocket.Validate if the designated constraints aren't met. +type TransportSocketValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TransportSocketValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TransportSocketValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TransportSocketValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TransportSocketValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TransportSocketValidationError) ErrorName() string { return "TransportSocketValidationError" } + +// Error satisfies the builtin error interface +func (e TransportSocketValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTransportSocket.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TransportSocketValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TransportSocketValidationError{} + +// Validate checks the field values on RuntimeFractionalPercent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeFractionalPercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFractionalPercent with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeFractionalPercentMultiError, or nil if none found. +func (m *RuntimeFractionalPercent) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFractionalPercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValue() == nil { + err := RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RuntimeKey + + if len(errors) > 0 { + return RuntimeFractionalPercentMultiError(errors) + } + + return nil +} + +// RuntimeFractionalPercentMultiError is an error wrapping multiple validation +// errors returned by RuntimeFractionalPercent.ValidateAll() if the designated +// constraints aren't met. +type RuntimeFractionalPercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFractionalPercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFractionalPercentMultiError) AllErrors() []error { return m } + +// RuntimeFractionalPercentValidationError is the validation error returned by +// RuntimeFractionalPercent.Validate if the designated constraints aren't met. +type RuntimeFractionalPercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFractionalPercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFractionalPercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFractionalPercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFractionalPercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFractionalPercentValidationError) ErrorName() string { + return "RuntimeFractionalPercentValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeFractionalPercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFractionalPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFractionalPercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFractionalPercentValidationError{} + +// Validate checks the field values on ControlPlane with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ControlPlane) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ControlPlane with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ControlPlaneMultiError, or +// nil if none found. +func (m *ControlPlane) ValidateAll() error { + return m.validate(true) +} + +func (m *ControlPlane) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Identifier + + if len(errors) > 0 { + return ControlPlaneMultiError(errors) + } + + return nil +} + +// ControlPlaneMultiError is an error wrapping multiple validation errors +// returned by ControlPlane.ValidateAll() if the designated constraints aren't met. +type ControlPlaneMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ControlPlaneMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ControlPlaneMultiError) AllErrors() []error { return m } + +// ControlPlaneValidationError is the validation error returned by +// ControlPlane.Validate if the designated constraints aren't met. +type ControlPlaneValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ControlPlaneValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ControlPlaneValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ControlPlaneValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ControlPlaneValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ControlPlaneValidationError) ErrorName() string { return "ControlPlaneValidationError" } + +// Error satisfies the builtin error interface +func (e ControlPlaneValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sControlPlane.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ControlPlaneValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ControlPlaneValidationError{} + +// Validate checks the field values on RetryPolicy_RetryPriority with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryPriority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryPriority with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryPriorityMultiError, or nil if none found. +func (m *RetryPolicy_RetryPriority) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryPriority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryPriorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryPriorityMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryPriorityMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryPriority.ValidateAll() if the +// designated constraints aren't met. +type RetryPolicy_RetryPriorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryPriorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryPriorityMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryPriorityValidationError is the validation error returned by +// RetryPolicy_RetryPriority.Validate if the designated constraints aren't met. +type RetryPolicy_RetryPriorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryPriorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryPriorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryPriorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryPriorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryPriorityValidationError) ErrorName() string { + return "RetryPolicy_RetryPriorityValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryPriorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryPriority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryPriorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryPriorityValidationError{} + +// Validate checks the field values on RetryPolicy_RetryHostPredicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryHostPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryHostPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryHostPredicateMultiError, or nil if none found. +func (m *RetryPolicy_RetryHostPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryHostPredicateMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryHostPredicateMultiError is an error wrapping multiple +// validation errors returned by RetryPolicy_RetryHostPredicate.ValidateAll() +// if the designated constraints aren't met. +type RetryPolicy_RetryHostPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryHostPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryHostPredicateMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryHostPredicateValidationError is the validation error +// returned by RetryPolicy_RetryHostPredicate.Validate if the designated +// constraints aren't met. +type RetryPolicy_RetryHostPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryHostPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryHostPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryHostPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryHostPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryHostPredicateValidationError) ErrorName() string { + return "RetryPolicy_RetryHostPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryHostPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryHostPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryHostPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryHostPredicateValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go new file mode 100644 index 000000000..fa4823023 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go @@ -0,0 +1,2497 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Locality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Locality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Locality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SubZone) > 0 { + i -= len(m.SubZone) + copy(dAtA[i:], m.SubZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubZone))) + i-- + dAtA[i] = 0x1a + } + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x12 + } + if len(m.Region) > 0 { + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != nil { + if vtmsg, ok := interface{}(m.Version).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Version) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Extension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Version != nil { + size, err := m.Version.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TypeDescriptor) > 0 { + i -= len(m.TypeDescriptor) + copy(dAtA[i:], m.TypeDescriptor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeDescriptor))) + i-- + dAtA[i] = 0x1a + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ListeningAddresses) > 0 { + for iNdEx := len(m.ListeningAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListeningAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ClientFeatures) > 0 { + for iNdEx := len(m.ClientFeatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClientFeatures[iNdEx]) + copy(dAtA[i:], m.ClientFeatures[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientFeatures[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Extensions) > 0 { + for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Extensions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentBuildVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.UserAgentName) > 0 { + i -= len(m.UserAgentName) + copy(dAtA[i:], m.UserAgentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentName))) + i-- + dAtA[i] = 0x32 + } + if m.Locality != nil { + size, err := m.Locality.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node_UserAgentVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.UserAgentVersion) + copy(dAtA[i:], m.UserAgentVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentVersion))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *Node_UserAgentBuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentBuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UserAgentBuildVersion != nil { + size, err := m.UserAgentBuildVersion.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypedFilterMetadata) > 0 { + for k := range m.TypedFilterMetadata { + v := m.TypedFilterMetadata[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.FilterMetadata) > 0 { + for k := range m.FilterMetadata { + v := m.FilterMetadata[k] + baseI := i + size, err := (*structpb.Struct)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeUInt32) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeUInt32) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeUInt32) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x1a + } + if m.DefaultValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DefaultValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *RuntimePercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimePercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimePercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeDouble) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeDouble) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeDouble) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DefaultValue)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFeatureFlag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFeatureFlag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFeatureFlag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + size, err := (*wrapperspb.BoolValue)(m.DefaultValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueAppend) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueAppend) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueAppend) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if m.Entry != nil { + size, err := m.Entry.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueMutation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueMutation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueMutation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Remove) > 0 { + i -= len(m.Remove) + copy(dAtA[i:], m.Remove) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Remove))) + i-- + dAtA[i] = 0x12 + } + if m.Append != nil { + size, err := m.Append.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RawValue) > 0 { + i -= len(m.RawValue) + copy(dAtA[i:], m.RawValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RawValue))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValueOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValueOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValueOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepEmptyValue { + i-- + if m.KeepEmptyValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.AppendAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AppendAction)) + i-- + dAtA[i] = 0x18 + } + if m.Append != nil { + size, err := (*wrapperspb.BoolValue)(m.Append).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WatchedDirectory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchedDirectory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WatchedDirectory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.Specifier.(*DataSource_EnvironmentVariable); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineString); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineBytes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_Filename); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DataSource_Filename) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_Filename) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *DataSource_InlineBytes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineBytes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineBytes) + copy(dAtA[i:], m.InlineBytes) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineBytes))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DataSource_InlineString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineString) + copy(dAtA[i:], m.InlineString) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineString))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *DataSource_EnvironmentVariable) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_EnvironmentVariable) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EnvironmentVariable) + copy(dAtA[i:], m.EnvironmentVariable) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EnvironmentVariable))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoteDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RemoteDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Sha256) > 0 { + i -= len(m.Sha256) + copy(dAtA[i:], m.Sha256) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sha256))) + i-- + dAtA[i] = 0x12 + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AsyncDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Specifier.(*AsyncDataSource_Remote); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*AsyncDataSource_Local); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource_Local) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Local) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Local != nil { + size, err := m.Local.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AsyncDataSource_Remote) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Remote) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Remote != nil { + size, err := m.Remote.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *TransportSocket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransportSocket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*TransportSocket_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransportSocket_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeFractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ControlPlane) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlPlane) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ControlPlane) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Locality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Region) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Zone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != nil { + if size, ok := interface{}(m.Version).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Version) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeDescriptor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Version != nil { + l = m.Version.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Disabled { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + l = m.Locality.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.UserAgentVersionType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ClientFeatures) > 0 { + for _, s := range m.ClientFeatures { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ListeningAddresses) > 0 { + for _, e := range m.ListeningAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node_UserAgentVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserAgentVersion) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Node_UserAgentBuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserAgentBuildVersion != nil { + l = m.UserAgentBuildVersion.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FilterMetadata) > 0 { + for k, v := range m.FilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*structpb.Struct)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.TypedFilterMetadata) > 0 { + for k, v := range m.TypedFilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeUInt32) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DefaultValue)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimePercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeDouble) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 9 + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFeatureFlag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + l = (*wrapperspb.BoolValue)(m.DefaultValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueAppend) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entry != nil { + l = m.Entry.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueMutation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Append != nil { + l = m.Append.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Remove) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RawValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValueOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Append != nil { + l = (*wrapperspb.BoolValue)(m.Append).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AppendAction)) + } + if m.KeepEmptyValue { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WatchedDirectory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource_Filename) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filename) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineBytes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineBytes) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineString) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_EnvironmentVariable) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EnvironmentVariable) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoteDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sha256) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource_Local) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Local != nil { + l = m.Local.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AsyncDataSource_Remote) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Remote != nil { + l = m.Remote.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TransportSocket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TransportSocket_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeFractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ControlPlane) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go new file mode 100644 index 000000000..295398b9f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go @@ -0,0 +1,1194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// xDS API and non-xDS services version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +type ApiVersion int32 + +const ( + // When not specified, we assume v3; it is the only supported version. + ApiVersion_AUTO ApiVersion = 0 + // Use xDS v2 API. This is no longer supported. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + ApiVersion_V2 ApiVersion = 1 + // Use xDS v3 API. + ApiVersion_V3 ApiVersion = 2 +) + +// Enum value maps for ApiVersion. +var ( + ApiVersion_name = map[int32]string{ + 0: "AUTO", + 1: "V2", + 2: "V3", + } + ApiVersion_value = map[string]int32{ + "AUTO": 0, + "V2": 1, + "V3": 2, + } +) + +func (x ApiVersion) Enum() *ApiVersion { + p := new(ApiVersion) + *p = x + return p +} + +func (x ApiVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ApiVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_config_source_proto_enumTypes[0].Descriptor() +} + +func (ApiVersion) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_config_source_proto_enumTypes[0] +} + +func (x ApiVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApiVersion.Descriptor instead. +func (ApiVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0} +} + +// APIs may be fetched via either REST or gRPC. +type ApiConfigSource_ApiType int32 + +const ( + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE ApiConfigSource_ApiType = 0 + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + ApiConfigSource_REST ApiConfigSource_ApiType = 1 + // SotW gRPC service. + ApiConfigSource_GRPC ApiConfigSource_ApiType = 2 + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + ApiConfigSource_DELTA_GRPC ApiConfigSource_ApiType = 3 + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + ApiConfigSource_AGGREGATED_GRPC ApiConfigSource_ApiType = 5 + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + ApiConfigSource_AGGREGATED_DELTA_GRPC ApiConfigSource_ApiType = 6 +) + +// Enum value maps for ApiConfigSource_ApiType. +var ( + ApiConfigSource_ApiType_name = map[int32]string{ + 0: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE", + 1: "REST", + 2: "GRPC", + 3: "DELTA_GRPC", + 5: "AGGREGATED_GRPC", + 6: "AGGREGATED_DELTA_GRPC", + } + ApiConfigSource_ApiType_value = map[string]int32{ + "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE": 0, + "REST": 1, + "GRPC": 2, + "DELTA_GRPC": 3, + "AGGREGATED_GRPC": 5, + "AGGREGATED_DELTA_GRPC": 6, + } +) + +func (x ApiConfigSource_ApiType) Enum() *ApiConfigSource_ApiType { + p := new(ApiConfigSource_ApiType) + *p = x + return p +} + +func (x ApiConfigSource_ApiType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ApiConfigSource_ApiType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_config_source_proto_enumTypes[1].Descriptor() +} + +func (ApiConfigSource_ApiType) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_config_source_proto_enumTypes[1] +} + +func (x ApiConfigSource_ApiType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApiConfigSource_ApiType.Descriptor instead. +func (ApiConfigSource_ApiType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0, 0} +} + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +// [#next-free-field: 10] +type ApiConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API type (gRPC, REST, delta gRPC) + ApiType ApiConfigSource_ApiType `protobuf:"varint,1,opt,name=api_type,json=apiType,proto3,enum=envoy.config.core.v3.ApiConfigSource_ApiType" json:"api_type,omitempty"` + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + TransportApiVersion ApiVersion `protobuf:"varint,8,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + ClusterNames []string `protobuf:"bytes,2,rep,name=cluster_names,json=clusterNames,proto3" json:"cluster_names,omitempty"` + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"` + // For REST APIs, the delay between successive polls. + RefreshDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"` + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + RequestTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"` + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + SetNodeOnFirstMessageOnly bool `protobuf:"varint,7,opt,name=set_node_on_first_message_only,json=setNodeOnFirstMessageOnly,proto3" json:"set_node_on_first_message_only,omitempty"` + // A list of config validators that will be executed when a new update is + // received from the ApiConfigSource. Note that each validator handles a + // specific xDS service type, and only the validators corresponding to the + // type url (in “:ref: DiscoveryResponse“ or “:ref: DeltaDiscoveryResponse“) + // will be invoked. + // If the validator returns false or throws an exception, the config will be rejected by + // the client, and a NACK will be sent. + // [#extension-category: envoy.config.validators] + ConfigValidators []*TypedExtensionConfig `protobuf:"bytes,9,rep,name=config_validators,json=configValidators,proto3" json:"config_validators,omitempty"` +} + +func (x *ApiConfigSource) Reset() { + *x = ApiConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiConfigSource) ProtoMessage() {} + +func (x *ApiConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiConfigSource.ProtoReflect.Descriptor instead. +func (*ApiConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0} +} + +func (x *ApiConfigSource) GetApiType() ApiConfigSource_ApiType { + if x != nil { + return x.ApiType + } + return ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE +} + +func (x *ApiConfigSource) GetTransportApiVersion() ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return ApiVersion_AUTO +} + +func (x *ApiConfigSource) GetClusterNames() []string { + if x != nil { + return x.ClusterNames + } + return nil +} + +func (x *ApiConfigSource) GetGrpcServices() []*GrpcService { + if x != nil { + return x.GrpcServices + } + return nil +} + +func (x *ApiConfigSource) GetRefreshDelay() *durationpb.Duration { + if x != nil { + return x.RefreshDelay + } + return nil +} + +func (x *ApiConfigSource) GetRequestTimeout() *durationpb.Duration { + if x != nil { + return x.RequestTimeout + } + return nil +} + +func (x *ApiConfigSource) GetRateLimitSettings() *RateLimitSettings { + if x != nil { + return x.RateLimitSettings + } + return nil +} + +func (x *ApiConfigSource) GetSetNodeOnFirstMessageOnly() bool { + if x != nil { + return x.SetNodeOnFirstMessageOnly + } + return false +} + +func (x *ApiConfigSource) GetConfigValidators() []*TypedExtensionConfig { + if x != nil { + return x.ConfigValidators + } + return nil +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +type AggregatedConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AggregatedConfigSource) Reset() { + *x = AggregatedConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AggregatedConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AggregatedConfigSource) ProtoMessage() {} + +func (x *AggregatedConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AggregatedConfigSource.ProtoReflect.Descriptor instead. +func (*AggregatedConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{1} +} + +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +type SelfConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + TransportApiVersion ApiVersion `protobuf:"varint,1,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` +} + +func (x *SelfConfigSource) Reset() { + *x = SelfConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelfConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelfConfigSource) ProtoMessage() {} + +func (x *SelfConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelfConfigSource.ProtoReflect.Descriptor instead. +func (*SelfConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{2} +} + +func (x *SelfConfigSource) GetTransportApiVersion() ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return ApiVersion_AUTO +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +type RateLimitSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + MaxTokens *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. The minimal fill rate is once per year. Lower + // fill rates will be set to once per year. + FillRate *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"` +} + +func (x *RateLimitSettings) Reset() { + *x = RateLimitSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitSettings) ProtoMessage() {} + +func (x *RateLimitSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitSettings.ProtoReflect.Descriptor instead. +func (*RateLimitSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{3} +} + +func (x *RateLimitSettings) GetMaxTokens() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxTokens + } + return nil +} + +func (x *RateLimitSettings) GetFillRate() *wrapperspb.DoubleValue { + if x != nil { + return x.FillRate + } + return nil +} + +// Local filesystem path configuration source. +type PathConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for a :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. + // This is because in general only moves are atomic. The same method of swapping files as is + // demonstrated in the :ref:`runtime documentation ` can be + // used here also. If ``watched_directory`` is configured, no watch will be placed directly on + // this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of + // this path. This is required in certain deployment scenarios. See below for more information. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // If configured, this directory will be watched for *moves*. When an entry in this directory is + // moved to, the “path“ will be reloaded. This is required in certain deployment scenarios. + // + // Specifically, if trying to load an xDS resource using a + // `Kubernetes ConfigMap `_, the + // following configuration might be used: + // 1. Store xds.yaml inside a ConfigMap. + // 2. Mount the ConfigMap to “/config_map/xds“ + // 3. Configure path “/config_map/xds/xds.yaml“ + // 4. Configure watched directory “/config_map/xds“ + // + // The above configuration will ensure that Envoy watches the owning directory for moves which is + // required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. + WatchedDirectory *WatchedDirectory `protobuf:"bytes,2,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` +} + +func (x *PathConfigSource) Reset() { + *x = PathConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathConfigSource) ProtoMessage() {} + +func (x *PathConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathConfigSource.ProtoReflect.Descriptor instead. +func (*PathConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{4} +} + +func (x *PathConfigSource) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *PathConfigSource) GetWatchedDirectory() *WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +// [#next-free-field: 9] +type ConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Authorities that this config source may be used for. An authority specified in a xdstp:// URL + // is resolved to a “ConfigSource“ prior to configuration fetch. This field provides the + // association between authority name and configuration source. + // [#not-implemented-hide:] + Authorities []*v3.Authority `protobuf:"bytes,7,rep,name=authorities,proto3" json:"authorities,omitempty"` + // Types that are assignable to ConfigSourceSpecifier: + // + // *ConfigSource_Path + // *ConfigSource_PathConfigSource + // *ConfigSource_ApiConfigSource + // *ConfigSource_Ads + // *ConfigSource_Self + ConfigSourceSpecifier isConfigSource_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + InitialFetchTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"` + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ResourceApiVersion ApiVersion `protobuf:"varint,6,opt,name=resource_api_version,json=resourceApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"resource_api_version,omitempty"` +} + +func (x *ConfigSource) Reset() { + *x = ConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigSource) ProtoMessage() {} + +func (x *ConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigSource.ProtoReflect.Descriptor instead. +func (*ConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{5} +} + +func (x *ConfigSource) GetAuthorities() []*v3.Authority { + if x != nil { + return x.Authorities + } + return nil +} + +func (m *ConfigSource) GetConfigSourceSpecifier() isConfigSource_ConfigSourceSpecifier { + if m != nil { + return m.ConfigSourceSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. +func (x *ConfigSource) GetPath() string { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Path); ok { + return x.Path + } + return "" +} + +func (x *ConfigSource) GetPathConfigSource() *PathConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_PathConfigSource); ok { + return x.PathConfigSource + } + return nil +} + +func (x *ConfigSource) GetApiConfigSource() *ApiConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_ApiConfigSource); ok { + return x.ApiConfigSource + } + return nil +} + +func (x *ConfigSource) GetAds() *AggregatedConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Ads); ok { + return x.Ads + } + return nil +} + +func (x *ConfigSource) GetSelf() *SelfConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Self); ok { + return x.Self + } + return nil +} + +func (x *ConfigSource) GetInitialFetchTimeout() *durationpb.Duration { + if x != nil { + return x.InitialFetchTimeout + } + return nil +} + +func (x *ConfigSource) GetResourceApiVersion() ApiVersion { + if x != nil { + return x.ResourceApiVersion + } + return ApiVersion_AUTO +} + +type isConfigSource_ConfigSourceSpecifier interface { + isConfigSource_ConfigSourceSpecifier() +} + +type ConfigSource_Path struct { + // Deprecated in favor of “path_config_source“. Use that field instead. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"` +} + +type ConfigSource_PathConfigSource struct { + // Local filesystem path configuration source. + PathConfigSource *PathConfigSource `protobuf:"bytes,8,opt,name=path_config_source,json=pathConfigSource,proto3,oneof"` +} + +type ConfigSource_ApiConfigSource struct { + // API configuration source. + ApiConfigSource *ApiConfigSource `protobuf:"bytes,2,opt,name=api_config_source,json=apiConfigSource,proto3,oneof"` +} + +type ConfigSource_Ads struct { + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + Ads *AggregatedConfigSource `protobuf:"bytes,3,opt,name=ads,proto3,oneof"` +} + +type ConfigSource_Self struct { + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + Self *SelfConfigSource `protobuf:"bytes,5,opt,name=self,proto3,oneof"` +} + +func (*ConfigSource_Path) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_PathConfigSource) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_ApiConfigSource) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_Ads) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_Self) isConfigSource_ConfigSourceSpecifier() {} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +type ExtensionConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigSource *ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // “apply_default_config_without_warming“ flag is set. + DefaultConfig *anypb.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + ApplyDefaultConfigWithoutWarming bool `protobuf:"varint,3,opt,name=apply_default_config_without_warming,json=applyDefaultConfigWithoutWarming,proto3" json:"apply_default_config_without_warming,omitempty"` + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + TypeUrls []string `protobuf:"bytes,4,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"` +} + +func (x *ExtensionConfigSource) Reset() { + *x = ExtensionConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionConfigSource) ProtoMessage() {} + +func (x *ExtensionConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionConfigSource.ProtoReflect.Descriptor instead. +func (*ExtensionConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{6} +} + +func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +func (x *ExtensionConfigSource) GetDefaultConfig() *anypb.Any { + if x != nil { + return x.DefaultConfig + } + return nil +} + +func (x *ExtensionConfigSource) GetApplyDefaultConfigWithoutWarming() bool { + if x != nil { + return x.ApplyDefaultConfigWithoutWarming + } + return false +} + +func (x *ExtensionConfigSource) GetTypeUrls() []string { + if x != nil { + return x.TypeUrls + } + return nil +} + +var File_envoy_config_core_v3_config_source_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_config_source_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x06, 0x0a, 0x0f, 0x41, 0x70, 0x69, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x08, + 0x61, 0x70, 0x69, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x0c, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, + 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4c, 0x0a, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x72, + 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x11, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1e, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x65, + 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, + 0x22, 0x92, 0x01, 0x0a, 0x07, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x25, + 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, + 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, + 0x54, 0x45, 0x44, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x47, + 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x06, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0x49, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x53, + 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, + 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x11, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3b, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x49, 0x0a, + 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x61, 0x74, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0x8c, 0x05, 0x0a, 0x0c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x0b, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x56, 0x0a, 0x12, 0x70, 0x61, 0x74, + 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x10, 0x70, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x53, 0x0a, 0x11, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x61, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x48, 0x00, 0x52, 0x03, 0x61, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, + 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x4d, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x5c, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x9e, 0x02, 0x0a, 0x15, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x24, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x57, 0x61, 0x72, + 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x2a, 0x33, 0x0a, 0x0a, 0x41, + 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, + 0x4f, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x8a, 0xf4, 0x9b, + 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x08, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, + 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_config_source_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_config_source_proto_rawDescData = file_envoy_config_core_v3_config_source_proto_rawDesc +) + +func file_envoy_config_core_v3_config_source_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_config_source_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_config_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_config_source_proto_rawDescData) + }) + return file_envoy_config_core_v3_config_source_proto_rawDescData +} + +var file_envoy_config_core_v3_config_source_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_core_v3_config_source_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{ + (ApiVersion)(0), // 0: envoy.config.core.v3.ApiVersion + (ApiConfigSource_ApiType)(0), // 1: envoy.config.core.v3.ApiConfigSource.ApiType + (*ApiConfigSource)(nil), // 2: envoy.config.core.v3.ApiConfigSource + (*AggregatedConfigSource)(nil), // 3: envoy.config.core.v3.AggregatedConfigSource + (*SelfConfigSource)(nil), // 4: envoy.config.core.v3.SelfConfigSource + (*RateLimitSettings)(nil), // 5: envoy.config.core.v3.RateLimitSettings + (*PathConfigSource)(nil), // 6: envoy.config.core.v3.PathConfigSource + (*ConfigSource)(nil), // 7: envoy.config.core.v3.ConfigSource + (*ExtensionConfigSource)(nil), // 8: envoy.config.core.v3.ExtensionConfigSource + (*GrpcService)(nil), // 9: envoy.config.core.v3.GrpcService + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*TypedExtensionConfig)(nil), // 11: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.UInt32Value)(nil), // 12: google.protobuf.UInt32Value + (*wrapperspb.DoubleValue)(nil), // 13: google.protobuf.DoubleValue + (*WatchedDirectory)(nil), // 14: envoy.config.core.v3.WatchedDirectory + (*v3.Authority)(nil), // 15: xds.core.v3.Authority + (*anypb.Any)(nil), // 16: google.protobuf.Any +} +var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType + 0, // 1: envoy.config.core.v3.ApiConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 9, // 2: envoy.config.core.v3.ApiConfigSource.grpc_services:type_name -> envoy.config.core.v3.GrpcService + 10, // 3: envoy.config.core.v3.ApiConfigSource.refresh_delay:type_name -> google.protobuf.Duration + 10, // 4: envoy.config.core.v3.ApiConfigSource.request_timeout:type_name -> google.protobuf.Duration + 5, // 5: envoy.config.core.v3.ApiConfigSource.rate_limit_settings:type_name -> envoy.config.core.v3.RateLimitSettings + 11, // 6: envoy.config.core.v3.ApiConfigSource.config_validators:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 7: envoy.config.core.v3.SelfConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 12, // 8: envoy.config.core.v3.RateLimitSettings.max_tokens:type_name -> google.protobuf.UInt32Value + 13, // 9: envoy.config.core.v3.RateLimitSettings.fill_rate:type_name -> google.protobuf.DoubleValue + 14, // 10: envoy.config.core.v3.PathConfigSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 15, // 11: envoy.config.core.v3.ConfigSource.authorities:type_name -> xds.core.v3.Authority + 6, // 12: envoy.config.core.v3.ConfigSource.path_config_source:type_name -> envoy.config.core.v3.PathConfigSource + 2, // 13: envoy.config.core.v3.ConfigSource.api_config_source:type_name -> envoy.config.core.v3.ApiConfigSource + 3, // 14: envoy.config.core.v3.ConfigSource.ads:type_name -> envoy.config.core.v3.AggregatedConfigSource + 4, // 15: envoy.config.core.v3.ConfigSource.self:type_name -> envoy.config.core.v3.SelfConfigSource + 10, // 16: envoy.config.core.v3.ConfigSource.initial_fetch_timeout:type_name -> google.protobuf.Duration + 0, // 17: envoy.config.core.v3.ConfigSource.resource_api_version:type_name -> envoy.config.core.v3.ApiVersion + 7, // 18: envoy.config.core.v3.ExtensionConfigSource.config_source:type_name -> envoy.config.core.v3.ConfigSource + 16, // 19: envoy.config.core.v3.ExtensionConfigSource.default_config:type_name -> google.protobuf.Any + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_config_source_proto_init() } +func file_envoy_config_core_v3_config_source_proto_init() { + if File_envoy_config_core_v3_config_source_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_grpc_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_config_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AggregatedConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelfConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*ConfigSource_Path)(nil), + (*ConfigSource_PathConfigSource)(nil), + (*ConfigSource_ApiConfigSource)(nil), + (*ConfigSource_Ads)(nil), + (*ConfigSource_Self)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_config_source_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_config_source_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_config_source_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_config_source_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_config_source_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_config_source_proto = out.File + file_envoy_config_core_v3_config_source_proto_rawDesc = nil + file_envoy_config_core_v3_config_source_proto_goTypes = nil + file_envoy_config_core_v3_config_source_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go new file mode 100644 index 000000000..c2d0b1595 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go @@ -0,0 +1,1345 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ApiConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ApiConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApiConfigSourceMultiError, or nil if none found. +func (m *ApiConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ApiConfigSource_ApiType_name[int32(m.GetApiType())]; !ok { + err := ApiConfigSourceValidationError{ + field: "ApiType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := ApiConfigSourceValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetGrpcServices() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRefreshDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRefreshDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetRequestTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ApiConfigSourceValidationError{ + field: "RequestTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ApiConfigSourceValidationError{ + field: "RequestTimeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetRateLimitSettings()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRateLimitSettings()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SetNodeOnFirstMessageOnly + + for idx, item := range m.GetConfigValidators() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ApiConfigSourceMultiError(errors) + } + + return nil +} + +// ApiConfigSourceMultiError is an error wrapping multiple validation errors +// returned by ApiConfigSource.ValidateAll() if the designated constraints +// aren't met. +type ApiConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiConfigSourceMultiError) AllErrors() []error { return m } + +// ApiConfigSourceValidationError is the validation error returned by +// ApiConfigSource.Validate if the designated constraints aren't met. +type ApiConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiConfigSourceValidationError) ErrorName() string { return "ApiConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e ApiConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiConfigSourceValidationError{} + +// Validate checks the field values on AggregatedConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AggregatedConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AggregatedConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AggregatedConfigSourceMultiError, or nil if none found. +func (m *AggregatedConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *AggregatedConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return AggregatedConfigSourceMultiError(errors) + } + + return nil +} + +// AggregatedConfigSourceMultiError is an error wrapping multiple validation +// errors returned by AggregatedConfigSource.ValidateAll() if the designated +// constraints aren't met. +type AggregatedConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AggregatedConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AggregatedConfigSourceMultiError) AllErrors() []error { return m } + +// AggregatedConfigSourceValidationError is the validation error returned by +// AggregatedConfigSource.Validate if the designated constraints aren't met. +type AggregatedConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AggregatedConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AggregatedConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AggregatedConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AggregatedConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AggregatedConfigSourceValidationError) ErrorName() string { + return "AggregatedConfigSourceValidationError" +} + +// Error satisfies the builtin error interface +func (e AggregatedConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAggregatedConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AggregatedConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AggregatedConfigSourceValidationError{} + +// Validate checks the field values on SelfConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SelfConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SelfConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SelfConfigSourceMultiError, or nil if none found. +func (m *SelfConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *SelfConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := SelfConfigSourceValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SelfConfigSourceMultiError(errors) + } + + return nil +} + +// SelfConfigSourceMultiError is an error wrapping multiple validation errors +// returned by SelfConfigSource.ValidateAll() if the designated constraints +// aren't met. +type SelfConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SelfConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SelfConfigSourceMultiError) AllErrors() []error { return m } + +// SelfConfigSourceValidationError is the validation error returned by +// SelfConfigSource.Validate if the designated constraints aren't met. +type SelfConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SelfConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SelfConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SelfConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SelfConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SelfConfigSourceValidationError) ErrorName() string { return "SelfConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e SelfConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSelfConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SelfConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SelfConfigSourceValidationError{} + +// Validate checks the field values on RateLimitSettings with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimitSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimitSettingsMultiError, or nil if none found. +func (m *RateLimitSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxTokens()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxTokens()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetFillRate(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := RateLimitSettingsValidationError{ + field: "FillRate", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return RateLimitSettingsMultiError(errors) + } + + return nil +} + +// RateLimitSettingsMultiError is an error wrapping multiple validation errors +// returned by RateLimitSettings.ValidateAll() if the designated constraints +// aren't met. +type RateLimitSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitSettingsMultiError) AllErrors() []error { return m } + +// RateLimitSettingsValidationError is the validation error returned by +// RateLimitSettings.Validate if the designated constraints aren't met. +type RateLimitSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitSettingsValidationError) ErrorName() string { + return "RateLimitSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitSettingsValidationError{} + +// Validate checks the field values on PathConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PathConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathConfigSourceMultiError, or nil if none found. +func (m *PathConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *PathConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := PathConfigSourceValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PathConfigSourceMultiError(errors) + } + + return nil +} + +// PathConfigSourceMultiError is an error wrapping multiple validation errors +// returned by PathConfigSource.ValidateAll() if the designated constraints +// aren't met. +type PathConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathConfigSourceMultiError) AllErrors() []error { return m } + +// PathConfigSourceValidationError is the validation error returned by +// PathConfigSource.Validate if the designated constraints aren't met. +type PathConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathConfigSourceValidationError) ErrorName() string { return "PathConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e PathConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathConfigSourceValidationError{} + +// Validate checks the field values on ConfigSource with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ConfigSourceMultiError, or +// nil if none found. +func (m *ConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAuthorities() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInitialFetchTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInitialFetchTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := ApiVersion_name[int32(m.GetResourceApiVersion())]; !ok { + err := ConfigSourceValidationError{ + field: "ResourceApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { + case *ConfigSource_Path: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + // no validation rules for Path + case *ConfigSource_PathConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetPathConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_ApiConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetApiConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_Ads: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_Self: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSelf()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSelf()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ConfigSourceMultiError(errors) + } + + return nil +} + +// ConfigSourceMultiError is an error wrapping multiple validation errors +// returned by ConfigSource.ValidateAll() if the designated constraints aren't met. +type ConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConfigSourceMultiError) AllErrors() []error { return m } + +// ConfigSourceValidationError is the validation error returned by +// ConfigSource.Validate if the designated constraints aren't met. +type ConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConfigSourceValidationError) ErrorName() string { return "ConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e ConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConfigSourceValidationError{} + +// Validate checks the field values on ExtensionConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExtensionConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtensionConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtensionConfigSourceMultiError, or nil if none found. +func (m *ExtensionConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtensionConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := ExtensionConfigSourceValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetConfigSource(); a != nil { + + } + + if all { + switch v := interface{}(m.GetDefaultConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ApplyDefaultConfigWithoutWarming + + if len(m.GetTypeUrls()) < 1 { + err := ExtensionConfigSourceValidationError{ + field: "TypeUrls", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ExtensionConfigSourceMultiError(errors) + } + + return nil +} + +// ExtensionConfigSourceMultiError is an error wrapping multiple validation +// errors returned by ExtensionConfigSource.ValidateAll() if the designated +// constraints aren't met. +type ExtensionConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionConfigSourceMultiError) AllErrors() []error { return m } + +// ExtensionConfigSourceValidationError is the validation error returned by +// ExtensionConfigSource.Validate if the designated constraints aren't met. +type ExtensionConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionConfigSourceValidationError) ErrorName() string { + return "ExtensionConfigSourceValidationError" +} + +// Error satisfies the builtin error interface +func (e ExtensionConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtensionConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionConfigSourceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go new file mode 100644 index 000000000..ae2fd091d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go @@ -0,0 +1,831 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ConfigValidators) > 0 { + for iNdEx := len(m.ConfigValidators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ConfigValidators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x40 + } + if m.SetNodeOnFirstMessageOnly { + i-- + if m.SetNodeOnFirstMessageOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RateLimitSettings != nil { + size, err := m.RateLimitSettings.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.GrpcServices) > 0 { + for iNdEx := len(m.GrpcServices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GrpcServices[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.RefreshDelay != nil { + size, err := (*durationpb.Duration)(m.RefreshDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ClusterNames) > 0 { + for iNdEx := len(m.ClusterNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterNames[iNdEx]) + copy(dAtA[i:], m.ClusterNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.ApiType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ApiType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AggregatedConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregatedConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AggregatedConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SelfConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SelfConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillRate != nil { + size, err := (*wrapperspb.DoubleValue)(m.FillRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxTokens).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PathConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_PathConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Authorities) > 0 { + for iNdEx := len(m.Authorities) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Authorities[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Authorities[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ResourceApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResourceApiVersion)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Self); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InitialFetchTimeout != nil { + size, err := (*durationpb.Duration)(m.InitialFetchTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Ads); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_ApiConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ConfigSource_ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApiConfigSource != nil { + size, err := m.ApiConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Ads) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Ads) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ads != nil { + size, err := m.Ads.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Self) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Self) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Self != nil { + size, err := m.Self.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathConfigSource != nil { + size, err := m.PathConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ExtensionConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.ApplyDefaultConfigWithoutWarming { + i-- + if m.ApplyDefaultConfigWithoutWarming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.DefaultConfig != nil { + size, err := (*anypb.Any)(m.DefaultConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + size, err := m.ConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ApiType)) + } + if len(m.ClusterNames) > 0 { + for _, s := range m.ClusterNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RefreshDelay != nil { + l = (*durationpb.Duration)(m.RefreshDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.GrpcServices) > 0 { + for _, e := range m.GrpcServices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitSettings != nil { + l = m.RateLimitSettings.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetNodeOnFirstMessageOnly { + n += 2 + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if len(m.ConfigValidators) > 0 { + for _, e := range m.ConfigValidators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AggregatedConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SelfConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != nil { + l = (*wrapperspb.UInt32Value)(m.MaxTokens).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillRate != nil { + l = (*wrapperspb.DoubleValue)(m.FillRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InitialFetchTimeout != nil { + l = (*durationpb.Duration)(m.InitialFetchTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResourceApiVersion)) + } + if len(m.Authorities) > 0 { + for _, e := range m.Authorities { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ConfigSource_ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiConfigSource != nil { + l = m.ApiConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Ads) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ads != nil { + l = m.Ads.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Self) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Self != nil { + l = m.Self.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathConfigSource != nil { + l = m.PathConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ExtensionConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + l = m.ConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultConfig != nil { + l = (*anypb.Any)(m.DefaultConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplyDefaultConfigWithoutWarming { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go new file mode 100644 index 000000000..8d995326c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +type EventServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConfigSourceSpecifier: + // + // *EventServiceConfig_GrpcService + ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` +} + +func (x *EventServiceConfig) Reset() { + *x = EventServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventServiceConfig) ProtoMessage() {} + +func (x *EventServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead. +func (*EventServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP(), []int{0} +} + +func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier { + if m != nil { + return m.ConfigSourceSpecifier + } + return nil +} + +func (x *EventServiceConfig) GetGrpcService() *GrpcService { + if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok { + return x.GrpcService + } + return nil +} + +type isEventServiceConfig_ConfigSourceSpecifier interface { + isEventServiceConfig_ConfigSourceSpecifier() +} + +type EventServiceConfig_GrpcService struct { + // Specifies the gRPC service that hosts the event reporting service. + GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"` +} + +func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {} + +var File_envoy_config_core_v3_event_service_config_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_event_service_config_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x12, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x8b, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_event_service_config_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_event_service_config_proto_rawDescData = file_envoy_config_core_v3_event_service_config_proto_rawDesc +) + +func file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_event_service_config_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_event_service_config_proto_rawDescData) + }) + return file_envoy_config_core_v3_event_service_config_proto_rawDescData +} + +var file_envoy_config_core_v3_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_event_service_config_proto_goTypes = []interface{}{ + (*EventServiceConfig)(nil), // 0: envoy.config.core.v3.EventServiceConfig + (*GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService +} +var file_envoy_config_core_v3_event_service_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.EventServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_event_service_config_proto_init() } +func file_envoy_config_core_v3_event_service_config_proto_init() { + if File_envoy_config_core_v3_event_service_config_proto != nil { + return + } + file_envoy_config_core_v3_grpc_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*EventServiceConfig_GrpcService)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_event_service_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_event_service_config_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_event_service_config_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_event_service_config_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_event_service_config_proto = out.File + file_envoy_config_core_v3_event_service_config_proto_rawDesc = nil + file_envoy_config_core_v3_event_service_config_proto_goTypes = nil + file_envoy_config_core_v3_event_service_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go new file mode 100644 index 000000000..21f29c8b6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go @@ -0,0 +1,197 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on EventServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EventServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EventServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EventServiceConfigMultiError, or nil if none found. +func (m *EventServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EventServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { + case *EventServiceConfig_GrpcService: + if v == nil { + err := EventServiceConfigValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { + err := EventServiceConfigValidationError{ + field: "ConfigSourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return EventServiceConfigMultiError(errors) + } + + return nil +} + +// EventServiceConfigMultiError is an error wrapping multiple validation errors +// returned by EventServiceConfig.ValidateAll() if the designated constraints +// aren't met. +type EventServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EventServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EventServiceConfigMultiError) AllErrors() []error { return m } + +// EventServiceConfigValidationError is the validation error returned by +// EventServiceConfig.Validate if the designated constraints aren't met. +type EventServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EventServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EventServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EventServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EventServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EventServiceConfigValidationError) ErrorName() string { + return "EventServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EventServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEventServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EventServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EventServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go new file mode 100644 index 000000000..c8e65c66f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *EventServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*EventServiceConfig_GrpcService); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EventServiceConfig_GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig_GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcService != nil { + size, err := m.GrpcService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *EventServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *EventServiceConfig_GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + l = m.GrpcService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go new file mode 100644 index 000000000..81ec41a6e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go @@ -0,0 +1,185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +type TypedExtensionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is “xds.type.v3.TypedStruct“ + // (or, for historical reasons, “udpa.type.v1.TypedStruct“), the inner type + // URL of “TypedStruct“ will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *TypedExtensionConfig) Reset() { + *x = TypedExtensionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedExtensionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedExtensionConfig) ProtoMessage() {} + +func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead. +func (*TypedExtensionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedExtensionConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +var File_envoy_config_core_v3_extension_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_extension_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x76, 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_extension_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_extension_proto_rawDescData = file_envoy_config_core_v3_extension_proto_rawDesc +) + +func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_extension_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_extension_proto_rawDescData) + }) + return file_envoy_config_core_v3_extension_proto_rawDescData +} + +var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{ + (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_extension_proto_init() } +func file_envoy_config_core_v3_extension_proto_init() { + if File_envoy_config_core_v3_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedExtensionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_extension_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_extension_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_extension_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_extension_proto = out.File + file_envoy_config_core_v3_extension_proto_rawDesc = nil + file_envoy_config_core_v3_extension_proto_goTypes = nil + file_envoy_config_core_v3_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go new file mode 100644 index 000000000..c0df5946a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go @@ -0,0 +1,165 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TypedExtensionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TypedExtensionConfigMultiError, or nil if none found. +func (m *TypedExtensionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedExtensionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TypedExtensionConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTypedConfig() == nil { + err := TypedExtensionConfigValidationError{ + field: "TypedConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetTypedConfig(); a != nil { + + } + + if len(errors) > 0 { + return TypedExtensionConfigMultiError(errors) + } + + return nil +} + +// TypedExtensionConfigMultiError is an error wrapping multiple validation +// errors returned by TypedExtensionConfig.ValidateAll() if the designated +// constraints aren't met. +type TypedExtensionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedExtensionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedExtensionConfigMultiError) AllErrors() []error { return m } + +// TypedExtensionConfigValidationError is the validation error returned by +// TypedExtensionConfig.Validate if the designated constraints aren't met. +type TypedExtensionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedExtensionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedExtensionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedExtensionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedExtensionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedExtensionConfigValidationError) ErrorName() string { + return "TypedExtensionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TypedExtensionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedExtensionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedExtensionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedExtensionConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go new file mode 100644 index 000000000..f81b8b38b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TypedExtensionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedExtensionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TypedExtensionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TypedExtensionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go new file mode 100644 index 000000000..199ac40f0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go @@ -0,0 +1,246 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A list of gRPC methods which can be used as an allowlist, for example. +type GrpcMethodList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Services []*GrpcMethodList_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` +} + +func (x *GrpcMethodList) Reset() { + *x = GrpcMethodList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcMethodList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcMethodList) ProtoMessage() {} + +func (x *GrpcMethodList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcMethodList.ProtoReflect.Descriptor instead. +func (*GrpcMethodList) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcMethodList) GetServices() []*GrpcMethodList_Service { + if x != nil { + return x.Services + } + return nil +} + +type GrpcMethodList_Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the gRPC service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The names of the gRPC methods in this service. + MethodNames []string `protobuf:"bytes,2,rep,name=method_names,json=methodNames,proto3" json:"method_names,omitempty"` +} + +func (x *GrpcMethodList_Service) Reset() { + *x = GrpcMethodList_Service{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcMethodList_Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcMethodList_Service) ProtoMessage() {} + +func (x *GrpcMethodList_Service) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcMethodList_Service.ProtoReflect.Descriptor instead. +func (*GrpcMethodList_Service) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GrpcMethodList_Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GrpcMethodList_Service) GetMethodNames() []string { + if x != nil { + return x.MethodNames + } + return nil +} + +var File_envoy_config_core_v3_grpc_method_list_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, + 0x02, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x48, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x87, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x47, 0x72, 0x70, 0x63, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = file_envoy_config_core_v3_grpc_method_list_proto_rawDesc +) + +func file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_method_list_proto_rawDescData) + }) + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescData +} + +var file_envoy_config_core_v3_grpc_method_list_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_grpc_method_list_proto_goTypes = []interface{}{ + (*GrpcMethodList)(nil), // 0: envoy.config.core.v3.GrpcMethodList + (*GrpcMethodList_Service)(nil), // 1: envoy.config.core.v3.GrpcMethodList.Service +} +var file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.GrpcMethodList.services:type_name -> envoy.config.core.v3.GrpcMethodList.Service + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_grpc_method_list_proto_init() } +func file_envoy_config_core_v3_grpc_method_list_proto_init() { + if File_envoy_config_core_v3_grpc_method_list_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcMethodList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcMethodList_Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_grpc_method_list_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_grpc_method_list_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_grpc_method_list_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_grpc_method_list_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_grpc_method_list_proto = out.File + file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = nil + file_envoy_config_core_v3_grpc_method_list_proto_goTypes = nil + file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go new file mode 100644 index 000000000..994cc4df7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go @@ -0,0 +1,295 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GrpcMethodList with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcMethodList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcMethodList with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GrpcMethodListMultiError, +// or nil if none found. +func (m *GrpcMethodList) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcMethodList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetServices() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return GrpcMethodListMultiError(errors) + } + + return nil +} + +// GrpcMethodListMultiError is an error wrapping multiple validation errors +// returned by GrpcMethodList.ValidateAll() if the designated constraints +// aren't met. +type GrpcMethodListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcMethodListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcMethodListMultiError) AllErrors() []error { return m } + +// GrpcMethodListValidationError is the validation error returned by +// GrpcMethodList.Validate if the designated constraints aren't met. +type GrpcMethodListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcMethodListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcMethodListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcMethodListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcMethodListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcMethodListValidationError) ErrorName() string { return "GrpcMethodListValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcMethodListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcMethodList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcMethodListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcMethodListValidationError{} + +// Validate checks the field values on GrpcMethodList_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcMethodList_Service) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcMethodList_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcMethodList_ServiceMultiError, or nil if none found. +func (m *GrpcMethodList_Service) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcMethodList_Service) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := GrpcMethodList_ServiceValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetMethodNames()) < 1 { + err := GrpcMethodList_ServiceValidationError{ + field: "MethodNames", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcMethodList_ServiceMultiError(errors) + } + + return nil +} + +// GrpcMethodList_ServiceMultiError is an error wrapping multiple validation +// errors returned by GrpcMethodList_Service.ValidateAll() if the designated +// constraints aren't met. +type GrpcMethodList_ServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcMethodList_ServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcMethodList_ServiceMultiError) AllErrors() []error { return m } + +// GrpcMethodList_ServiceValidationError is the validation error returned by +// GrpcMethodList_Service.Validate if the designated constraints aren't met. +type GrpcMethodList_ServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcMethodList_ServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcMethodList_ServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcMethodList_ServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcMethodList_ServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcMethodList_ServiceValidationError) ErrorName() string { + return "GrpcMethodList_ServiceValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcMethodList_ServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcMethodList_Service.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcMethodList_ServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcMethodList_ServiceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go new file mode 100644 index 000000000..940531d1d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcMethodList_Service) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList_Service) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList_Service) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MethodNames) > 0 { + for iNdEx := len(m.MethodNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MethodNames[iNdEx]) + copy(dAtA[i:], m.MethodNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MethodNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Services) > 0 { + for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Services[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList_Service) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.MethodNames) > 0 { + for _, s := range m.MethodNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcMethodList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go new file mode 100644 index 000000000..3967277f6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go @@ -0,0 +1,1814 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +// [#next-free-field: 7] +type GrpcService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TargetSpecifier: + // + // *GrpcService_EnvoyGrpc_ + // *GrpcService_GoogleGrpc_ + TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"` + // The timeout for the gRPC request. This is the timeout for a specific + // request. + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. “x-foo-bar: baz-key“) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. + InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` + // Optional default retry policy for streams toward the service. + // If an async stream doesn't have retry policy configured in its stream options, this retry policy is used. + RetryPolicy *RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` +} + +func (x *GrpcService) Reset() { + *x = GrpcService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService) ProtoMessage() {} + +func (x *GrpcService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService.ProtoReflect.Descriptor instead. +func (*GrpcService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (m *GrpcService) GetTargetSpecifier() isGrpcService_TargetSpecifier { + if m != nil { + return m.TargetSpecifier + } + return nil +} + +func (x *GrpcService) GetEnvoyGrpc() *GrpcService_EnvoyGrpc { + if x, ok := x.GetTargetSpecifier().(*GrpcService_EnvoyGrpc_); ok { + return x.EnvoyGrpc + } + return nil +} + +func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc { + if x, ok := x.GetTargetSpecifier().(*GrpcService_GoogleGrpc_); ok { + return x.GoogleGrpc + } + return nil +} + +func (x *GrpcService) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *GrpcService) GetInitialMetadata() []*HeaderValue { + if x != nil { + return x.InitialMetadata + } + return nil +} + +func (x *GrpcService) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +type isGrpcService_TargetSpecifier interface { + isGrpcService_TargetSpecifier() +} + +type GrpcService_EnvoyGrpc_ struct { + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc *GrpcService_EnvoyGrpc `protobuf:"bytes,1,opt,name=envoy_grpc,json=envoyGrpc,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ struct { + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc *GrpcService_GoogleGrpc `protobuf:"bytes,2,opt,name=google_grpc,json=googleGrpc,proto3,oneof"` +} + +func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {} + +func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {} + +// [#next-free-field: 6] +type GrpcService_EnvoyGrpc struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`transport_socket + // `. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The “:authority“ header in the grpc request. If this field is not set, the authority header value will be “cluster_name“. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // Indicates the retry policy for re-establishing the gRPC stream + // This field is optional. If max interval is not provided, it will be set to ten times the provided base interval. + // Currently only supported for xDS gRPC streams. + // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. + RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // Maximum gRPC message size that is allowed to be received. + // If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error. + // This limit is applied to individual messages in the streaming response and not the total size of streaming response. + // Defaults to 0, which means unlimited. + MaxReceiveMessageLength *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_receive_message_length,json=maxReceiveMessageLength,proto3" json:"max_receive_message_length,omitempty"` + // This provides gRPC client level control over envoy generated headers. + // If false, the header will be sent but it can be overridden by per stream option. + // If true, the header will be removed and can not be overridden by per stream option. + // Default to false. + SkipEnvoyHeaders bool `protobuf:"varint,5,opt,name=skip_envoy_headers,json=skipEnvoyHeaders,proto3" json:"skip_envoy_headers,omitempty"` +} + +func (x *GrpcService_EnvoyGrpc) Reset() { + *x = GrpcService_EnvoyGrpc{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_EnvoyGrpc) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_EnvoyGrpc) ProtoMessage() {} + +func (x *GrpcService_EnvoyGrpc) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_EnvoyGrpc.ProtoReflect.Descriptor instead. +func (*GrpcService_EnvoyGrpc) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GrpcService_EnvoyGrpc) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *GrpcService_EnvoyGrpc) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *GrpcService_EnvoyGrpc) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *GrpcService_EnvoyGrpc) GetMaxReceiveMessageLength() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxReceiveMessageLength + } + return nil +} + +func (x *GrpcService_EnvoyGrpc) GetSkipEnvoyHeaders() bool { + if x != nil { + return x.SkipEnvoyHeaders + } + return false +} + +// [#next-free-field: 9] +type GrpcService_GoogleGrpc struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + TargetUri string `protobuf:"bytes,1,opt,name=target_uri,json=targetUri,proto3" json:"target_uri,omitempty"` + ChannelCredentials *GrpcService_GoogleGrpc_ChannelCredentials `protobuf:"bytes,2,opt,name=channel_credentials,json=channelCredentials,proto3" json:"channel_credentials,omitempty"` + // A set of call credentials that can be composed with `channel credentials + // `_. + CallCredentials []*GrpcService_GoogleGrpc_CallCredentials `protobuf:"bytes,3,rep,name=call_credentials,json=callCredentials,proto3" json:"call_credentials,omitempty"` + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + StatPrefix string `protobuf:"bytes,4,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"` + // Additional configuration for site-specific customizations of the Google + // gRPC library. + Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + PerStreamBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"` + // Custom channels args. + ChannelArgs *GrpcService_GoogleGrpc_ChannelArgs `protobuf:"bytes,8,opt,name=channel_args,json=channelArgs,proto3" json:"channel_args,omitempty"` +} + +func (x *GrpcService_GoogleGrpc) Reset() { + *x = GrpcService_GoogleGrpc{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *GrpcService_GoogleGrpc) GetTargetUri() string { + if x != nil { + return x.TargetUri + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetChannelCredentials() *GrpcService_GoogleGrpc_ChannelCredentials { + if x != nil { + return x.ChannelCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetCallCredentials() []*GrpcService_GoogleGrpc_CallCredentials { + if x != nil { + return x.CallCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string { + if x != nil { + return x.CredentialsFactoryName + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerStreamBufferLimitBytes + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetChannelArgs() *GrpcService_GoogleGrpc_ChannelArgs { + if x != nil { + return x.ChannelArgs + } + return nil +} + +// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. +type GrpcService_GoogleGrpc_SslCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PEM encoded server root certificates. + RootCerts *DataSource `protobuf:"bytes,1,opt,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"` + // PEM encoded client private key. + PrivateKey *DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // PEM encoded client certificate chain. + CertChain *DataSource `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) Reset() { + *x = GrpcService_GoogleGrpc_SslCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_SslCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_SslCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_SslCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_SslCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetRootCerts() *DataSource { + if x != nil { + return x.RootCerts + } + return nil +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetPrivateKey() *DataSource { + if x != nil { + return x.PrivateKey + } + return nil +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetCertChain() *DataSource { + if x != nil { + return x.CertChain + } + return nil +} + +// Local channel credentials. Only UDS is supported for now. +// See https://github.com/grpc/grpc/pull/15909. +type GrpcService_GoogleGrpc_GoogleLocalCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) Reset() { + *x = GrpcService_GoogleGrpc_GoogleLocalCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_GoogleLocalCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 1} +} + +// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call +// credential types. +type GrpcService_GoogleGrpc_ChannelCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to CredentialSpecifier: + // + // *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials + // *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault + // *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials + CredentialSpecifier isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) Reset() { + *x = GrpcService_GoogleGrpc_ChannelCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier { + if m != nil { + return m.CredentialSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetSslCredentials() *GrpcService_GoogleGrpc_SslCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { + return x.SslCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetGoogleDefault() *emptypb.Empty { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { + return x.GoogleDefault + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetLocalCredentials() *GrpcService_GoogleGrpc_GoogleLocalCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { + return x.LocalCredentials + } + return nil +} + +type isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier interface { + isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() +} + +type GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials struct { + SslCredentials *GrpcService_GoogleGrpc_SslCredentials `protobuf:"bytes,1,opt,name=ssl_credentials,json=sslCredentials,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault struct { + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + GoogleDefault *emptypb.Empty `protobuf:"bytes,2,opt,name=google_default,json=googleDefault,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials struct { + LocalCredentials *GrpcService_GoogleGrpc_GoogleLocalCredentials `protobuf:"bytes,3,opt,name=local_credentials,json=localCredentials,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +// [#next-free-field: 8] +type GrpcService_GoogleGrpc_CallCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to CredentialSpecifier: + // + // *GrpcService_GoogleGrpc_CallCredentials_AccessToken + // *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine + // *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken + // *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess + // *GrpcService_GoogleGrpc_CallCredentials_GoogleIam + // *GrpcService_GoogleGrpc_CallCredentials_FromPlugin + // *GrpcService_GoogleGrpc_CallCredentials_StsService_ + CredentialSpecifier isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3} +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier { + if m != nil { + return m.CredentialSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetAccessToken() string { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { + return x.AccessToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleComputeEngine() *emptypb.Empty { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { + return x.GoogleComputeEngine + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleRefreshToken() string { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { + return x.GoogleRefreshToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetServiceAccountJwtAccess() *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { + return x.ServiceAccountJwtAccess + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleIam() *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { + return x.GoogleIam + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetFromPlugin() *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { + return x.FromPlugin + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetStsService() *GrpcService_GoogleGrpc_CallCredentials_StsService { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { + return x.StsService + } + return nil +} + +type isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier interface { + isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() +} + +type GrpcService_GoogleGrpc_CallCredentials_AccessToken struct { + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine struct { + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + GoogleComputeEngine *emptypb.Empty `protobuf:"bytes,2,opt,name=google_compute_engine,json=googleComputeEngine,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken struct { + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + GoogleRefreshToken string `protobuf:"bytes,3,opt,name=google_refresh_token,json=googleRefreshToken,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess struct { + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJwtAccess *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials `protobuf:"bytes,4,opt,name=service_account_jwt_access,json=serviceAccountJwtAccess,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleIam struct { + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIam *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials `protobuf:"bytes,5,opt,name=google_iam,json=googleIam,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_FromPlugin struct { + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + FromPlugin *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin `protobuf:"bytes,6,opt,name=from_plugin,json=fromPlugin,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_StsService_ struct { + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService *GrpcService_GoogleGrpc_CallCredentials_StsService `protobuf:"bytes,7,opt,name=sts_service,json=stsService,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_CallCredentials_AccessToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_StsService_) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +// Channel arguments. +type GrpcService_GoogleGrpc_ChannelArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + Args map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) Reset() { + *x = GrpcService_GoogleGrpc_ChannelArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelArgs) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelArgs) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4} +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) GetArgs() map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value { + if x != nil { + return x.Args + } + return nil +} + +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + JsonKey string `protobuf:"bytes,1,opt,name=json_key,json=jsonKey,proto3" json:"json_key,omitempty"` + TokenLifetimeSeconds uint64 `protobuf:"varint,2,opt,name=token_lifetime_seconds,json=tokenLifetimeSeconds,proto3" json:"token_lifetime_seconds,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 0} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetJsonKey() string { + if x != nil { + return x.JsonKey + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetTokenLifetimeSeconds() uint64 { + if x != nil { + return x.TokenLifetimeSeconds + } + return 0 +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthorizationToken string `protobuf:"bytes,1,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` + AuthoritySelector string `protobuf:"bytes,2,opt,name=authority_selector,json=authoritySelector,proto3" json:"authority_selector,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 1} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthorizationToken() string { + if x != nil { + return x.AuthorizationToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthoritySelector() string { + if x != nil { + return x.AuthoritySelector + } + return "" +} + +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.grpc_credentials] + // + // Types that are assignable to ConfigType: + // + // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig + ConfigType isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 2} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfigType() isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType interface { + isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() +} + +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { +} + +// Security token service configuration that allows Google gRPC to +// fetch security token from an OAuth 2.0 authorization server. +// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and +// https://github.com/grpc/grpc/pull/19587. +// [#next-free-field: 10] +type GrpcService_GoogleGrpc_CallCredentials_StsService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/bufbuild/protoc-gen-validate/issues/303] + TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"` + // Location of the target service or resource where the client + // intends to use the requested security token. + Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Logical name of the target service where the client intends to + // use the requested security token. + Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"` + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + Scope string `protobuf:"bytes,4,opt,name=scope,proto3" json:"scope,omitempty"` + // Type of the requested security token. + RequestedTokenType string `protobuf:"bytes,5,opt,name=requested_token_type,json=requestedTokenType,proto3" json:"requested_token_type,omitempty"` + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + SubjectTokenPath string `protobuf:"bytes,6,opt,name=subject_token_path,json=subjectTokenPath,proto3" json:"subject_token_path,omitempty"` + // Type of the subject token. + SubjectTokenType string `protobuf:"bytes,7,opt,name=subject_token_type,json=subjectTokenType,proto3" json:"subject_token_type,omitempty"` + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + ActorTokenPath string `protobuf:"bytes,8,opt,name=actor_token_path,json=actorTokenPath,proto3" json:"actor_token_path,omitempty"` + // Type of the actor token. + ActorTokenType string `protobuf:"bytes,9,opt,name=actor_token_type,json=actorTokenType,proto3" json:"actor_token_type,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_StsService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_StsService.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_StsService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 3} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetTokenExchangeServiceUri() string { + if x != nil { + return x.TokenExchangeServiceUri + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetScope() string { + if x != nil { + return x.Scope + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetRequestedTokenType() string { + if x != nil { + return x.RequestedTokenType + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenPath() string { + if x != nil { + return x.SubjectTokenPath + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenType() string { + if x != nil { + return x.SubjectTokenType + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenPath() string { + if x != nil { + return x.ActorTokenPath + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenType() string { + if x != nil { + return x.ActorTokenType + } + return "" +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + // + // Types that are assignable to ValueSpecifier: + // + // *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue + // *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue + ValueSpecifier isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier `protobuf_oneof:"value_specifier"` +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) Reset() { + *x = GrpcService_GoogleGrpc_ChannelArgs_Value{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs_Value.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelArgs_Value) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4, 0} +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) GetValueSpecifier() isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier { + if m != nil { + return m.ValueSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetStringValue() string { + if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetIntValue() int64 { + if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok { + return x.IntValue + } + return 0 +} + +type isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier interface { + isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() { +} + +var File_envoy_config_core_v3_grpc_service_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x23, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x67, 0x72, 0x70, + 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x09, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, + 0x63, 0x12, 0x4f, 0x0a, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x70, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xe7, 0x02, 0x0a, 0x09, + 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, + 0x00, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x59, 0x0a, 0x1a, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, + 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, 0x13, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x67, + 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0c, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, 0x73, + 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0a, + 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, 0x0a, + 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, + 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, 0x12, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a, + 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, 0x1a, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d, + 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, + 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x22, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, + 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, + 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, 0x56, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, 0x9a, + 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, + 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, 0x0b, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, + 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_grpc_service_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_grpc_service_proto_rawDescData = file_envoy_config_core_v3_grpc_service_proto_rawDesc +) + +func file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_grpc_service_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_service_proto_rawDescData) + }) + return file_envoy_config_core_v3_grpc_service_proto_rawDescData +} + +var file_envoy_config_core_v3_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{ + (*GrpcService)(nil), // 0: envoy.config.core.v3.GrpcService + (*GrpcService_EnvoyGrpc)(nil), // 1: envoy.config.core.v3.GrpcService.EnvoyGrpc + (*GrpcService_GoogleGrpc)(nil), // 2: envoy.config.core.v3.GrpcService.GoogleGrpc + (*GrpcService_GoogleGrpc_SslCredentials)(nil), // 3: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + (*GrpcService_GoogleGrpc_GoogleLocalCredentials)(nil), // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + (*GrpcService_GoogleGrpc_ChannelCredentials)(nil), // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + (*GrpcService_GoogleGrpc_CallCredentials)(nil), // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + (*GrpcService_GoogleGrpc_ChannelArgs)(nil), // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials)(nil), // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials)(nil), // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + (*GrpcService_GoogleGrpc_ChannelArgs_Value)(nil), // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + (*durationpb.Duration)(nil), // 14: google.protobuf.Duration + (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue + (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*structpb.Struct)(nil), // 18: google.protobuf.Struct + (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource + (*emptypb.Empty)(nil), // 20: google.protobuf.Empty + (*anypb.Any)(nil), // 21: google.protobuf.Any +} +var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc + 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc + 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration + 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue + 16, // 4: envoy.config.core.v3.GrpcService.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 16, // 5: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 17, // 6: envoy.config.core.v3.GrpcService.EnvoyGrpc.max_receive_message_length:type_name -> google.protobuf.UInt32Value + 5, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + 6, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + 18, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct + 17, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 7, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource + 19, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource + 19, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource + 3, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty + 4, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + 20, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty + 8, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + 9, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + 10, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + 11, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + 13, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + 21, // 24: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any + 12, // 25: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_grpc_service_proto_init() } +func file_envoy_config_core_v3_grpc_service_proto_init() { + if File_envoy_config_core_v3_grpc_service_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_EnvoyGrpc); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_SslCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_GoogleLocalCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_StsService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs_Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcService_EnvoyGrpc_)(nil), + (*GrpcService_GoogleGrpc_)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials)(nil), + (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault)(nil), + (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_CallCredentials_AccessToken)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_StsService_)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue)(nil), + (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_grpc_service_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_grpc_service_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_grpc_service_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_grpc_service_proto = out.File + file_envoy_config_core_v3_grpc_service_proto_rawDesc = nil + file_envoy_config_core_v3_grpc_service_proto_goTypes = nil + file_envoy_config_core_v3_grpc_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go new file mode 100644 index 000000000..9ef41b077 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go @@ -0,0 +1,2579 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GrpcService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GrpcServiceMultiError, or +// nil if none found. +func (m *GrpcService) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetInitialMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTargetSpecifierPresent := false + switch v := m.TargetSpecifier.(type) { + case *GrpcService_EnvoyGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true + + if all { + switch v := interface{}(m.GetEnvoyGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvoyGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTargetSpecifierPresent { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcServiceMultiError(errors) + } + + return nil +} + +// GrpcServiceMultiError is an error wrapping multiple validation errors +// returned by GrpcService.ValidateAll() if the designated constraints aren't met. +type GrpcServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcServiceMultiError) AllErrors() []error { return m } + +// GrpcServiceValidationError is the validation error returned by +// GrpcService.Validate if the designated constraints aren't met. +type GrpcServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcServiceValidationError) ErrorName() string { return "GrpcServiceValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcServiceValidationError{} + +// Validate checks the field values on GrpcService_EnvoyGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_EnvoyGrpc) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_EnvoyGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_EnvoyGrpcMultiError, or nil if none found. +func (m *GrpcService_EnvoyGrpc) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_EnvoyGrpc) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetAuthority()) < 0 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value length must be at least 0 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetAuthority()) > 16384 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_GrpcService_EnvoyGrpc_Authority_Pattern.MatchString(m.GetAuthority()) { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxReceiveMessageLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxReceiveMessageLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SkipEnvoyHeaders + + if len(errors) > 0 { + return GrpcService_EnvoyGrpcMultiError(errors) + } + + return nil +} + +// GrpcService_EnvoyGrpcMultiError is an error wrapping multiple validation +// errors returned by GrpcService_EnvoyGrpc.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_EnvoyGrpcMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_EnvoyGrpcMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_EnvoyGrpcMultiError) AllErrors() []error { return m } + +// GrpcService_EnvoyGrpcValidationError is the validation error returned by +// GrpcService_EnvoyGrpc.Validate if the designated constraints aren't met. +type GrpcService_EnvoyGrpcValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_EnvoyGrpcValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_EnvoyGrpcValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_EnvoyGrpcValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_EnvoyGrpcValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_EnvoyGrpcValidationError) ErrorName() string { + return "GrpcService_EnvoyGrpcValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_EnvoyGrpcValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_EnvoyGrpc.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_EnvoyGrpcValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_EnvoyGrpcValidationError{} + +var _GrpcService_EnvoyGrpc_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on GrpcService_GoogleGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpcMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetTargetUri()) < 1 { + err := GrpcService_GoogleGrpcValidationError{ + field: "TargetUri", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetChannelCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChannelCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCallCredentials() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetStatPrefix()) < 1 { + err := GrpcService_GoogleGrpcValidationError{ + field: "StatPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for CredentialsFactoryName + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerStreamBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerStreamBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetChannelArgs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChannelArgs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpcMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpcMultiError is an error wrapping multiple validation +// errors returned by GrpcService_GoogleGrpc.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpcMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpcMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpcMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpcValidationError is the validation error returned by +// GrpcService_GoogleGrpc.Validate if the designated constraints aren't met. +type GrpcService_GoogleGrpcValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpcValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpcValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpcValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpcValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpcValidationError) ErrorName() string { + return "GrpcService_GoogleGrpcValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpcValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpcValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpcValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_SslCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_SslCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc_SslCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_SslCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_SslCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRootCerts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRootCerts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCertChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCertChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_SslCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_SslCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_SslCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_SslCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_SslCredentialsValidationError is the validation error +// returned by GrpcService_GoogleGrpc_SslCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_SslCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_SslCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_SslCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_SslCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_SslCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_GoogleLocalCredentials.ValidateAll() if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError is the +// validation error returned by +// GrpcService_GoogleGrpc_GoogleLocalCredentials.Validate if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_GoogleLocalCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_ChannelCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { + case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSslCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSslCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleDefault()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleDefault()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocalCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_ChannelCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelCredentialsValidationError is the validation +// error returned by GrpcService_GoogleGrpc_ChannelCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_ChannelCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_CallCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { + case *GrpcService_GoogleGrpc_CallCredentials_AccessToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + // no validation rules for AccessToken + case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleComputeEngine()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleComputeEngine()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + // no validation rules for GoogleRefreshToken + case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetServiceAccountJwtAccess()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServiceAccountJwtAccess()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleIam()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleIam()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetFromPlugin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFromPlugin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_StsService_: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStsService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStsService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_CallCredentialsValidationError is the validation +// error returned by GrpcService_GoogleGrpc_CallCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentialsValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *GrpcService_GoogleGrpc_ChannelArgs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc_ChannelArgs +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelArgsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelArgs) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + { + sorted_keys := make([]string, len(m.GetArgs())) + i := 0 + for key := range m.GetArgs() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetArgs()[key] + _ = val + + // no validation rules for Args[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelArgsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelArgsMultiError is an error wrapping multiple +// validation errors returned by +// GrpcService_GoogleGrpc_ChannelArgs.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelArgsValidationError is the validation error +// returned by GrpcService_GoogleGrpc_ChannelArgs.Validate if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelArgsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelArgs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelArgsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelArgsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError, +// or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for JsonKey + + // no validation rules for TokenLifetimeSeconds + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError +// is an error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.Validate +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError, or +// nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AuthorizationToken + + // no validation rules for AuthoritySelector + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError is an +// error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError, +// or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError +// is an error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.Validate +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TokenExchangeServiceUri + + // no validation rules for Resource + + // no validation rules for Audience + + // no validation rules for Scope + + // no validation rules for RequestedTokenType + + if utf8.RuneCountInString(m.GetSubjectTokenPath()) < 1 { + err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ + field: "SubjectTokenPath", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetSubjectTokenType()) < 1 { + err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ + field: "SubjectTokenType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ActorTokenPath + + // no validation rules for ActorTokenType + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError is an error +// wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_StsService.ValidateAll() if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError is the +// validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_StsService.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_StsService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs_Value +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_ChannelArgs_Value with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofValueSpecifierPresent := false + switch v := m.ValueSpecifier.(type) { + case *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true + // no validation rules for StringValue + case *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true + // no validation rules for IntValue + default: + _ = v // ensures v is used + } + if !oneofValueSpecifierPresent { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_ChannelArgs_Value.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError is the validation +// error returned by GrpcService_GoogleGrpc_ChannelArgs_Value.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelArgs_Value.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go new file mode 100644 index 000000000..90d07efa4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go @@ -0,0 +1,1648 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + emptypb "github.com/planetscale/vtprotobuf/types/known/emptypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcService_EnvoyGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_EnvoyGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipEnvoyHeaders { + i-- + if m.SkipEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.MaxReceiveMessageLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CertChain != nil { + size, err := m.CertChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + size, err := m.PrivateKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RootCerts != nil { + size, err := m.RootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SslCredentials != nil { + size, err := m.SslCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleDefault != nil { + size, err := (*emptypb.Empty)(m.GoogleDefault).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalCredentials != nil { + size, err := m.LocalCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TokenLifetimeSeconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TokenLifetimeSeconds)) + i-- + dAtA[i] = 0x10 + } + if len(m.JsonKey) > 0 { + i -= len(m.JsonKey) + copy(dAtA[i:], m.JsonKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.JsonKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AuthoritySelector) > 0 { + i -= len(m.AuthoritySelector) + copy(dAtA[i:], m.AuthoritySelector) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthoritySelector))) + i-- + dAtA[i] = 0x12 + } + if len(m.AuthorizationToken) > 0 { + i -= len(m.AuthorizationToken) + copy(dAtA[i:], m.AuthorizationToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthorizationToken))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ActorTokenType) > 0 { + i -= len(m.ActorTokenType) + copy(dAtA[i:], m.ActorTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenType))) + i-- + dAtA[i] = 0x4a + } + if len(m.ActorTokenPath) > 0 { + i -= len(m.ActorTokenPath) + copy(dAtA[i:], m.ActorTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenPath))) + i-- + dAtA[i] = 0x42 + } + if len(m.SubjectTokenType) > 0 { + i -= len(m.SubjectTokenType) + copy(dAtA[i:], m.SubjectTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenType))) + i-- + dAtA[i] = 0x3a + } + if len(m.SubjectTokenPath) > 0 { + i -= len(m.SubjectTokenPath) + copy(dAtA[i:], m.SubjectTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenPath))) + i-- + dAtA[i] = 0x32 + } + if len(m.RequestedTokenType) > 0 { + i -= len(m.RequestedTokenType) + copy(dAtA[i:], m.RequestedTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestedTokenType))) + i-- + dAtA[i] = 0x2a + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Audience) > 0 { + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0x1a + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + } + if len(m.TokenExchangeServiceUri) > 0 { + i -= len(m.TokenExchangeServiceUri) + copy(dAtA[i:], m.TokenExchangeServiceUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TokenExchangeServiceUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.AccessToken) + copy(dAtA[i:], m.AccessToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessToken))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleComputeEngine != nil { + size, err := (*emptypb.Empty)(m.GoogleComputeEngine).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.GoogleRefreshToken) + copy(dAtA[i:], m.GoogleRefreshToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GoogleRefreshToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ServiceAccountJwtAccess != nil { + size, err := m.ServiceAccountJwtAccess.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleIam != nil { + size, err := m.GoogleIam.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FromPlugin != nil { + size, err := m.FromPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StsService != nil { + size, err := m.StsService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Args) > 0 { + for k := range m.Args { + v := m.Args[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ChannelArgs != nil { + size, err := m.ChannelArgs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.PerStreamBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.CredentialsFactoryName) > 0 { + i -= len(m.CredentialsFactoryName) + copy(dAtA[i:], m.CredentialsFactoryName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CredentialsFactoryName))) + i-- + dAtA[i] = 0x2a + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x22 + } + if len(m.CallCredentials) > 0 { + for iNdEx := len(m.CallCredentials) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CallCredentials[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ChannelCredentials != nil { + size, err := m.ChannelCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetUri) > 0 { + i -= len(m.TargetUri) + copy(dAtA[i:], m.TargetUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.TargetSpecifier.(*GrpcService_GoogleGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TargetSpecifier.(*GrpcService_EnvoyGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyGrpc != nil { + size, err := m.EnvoyGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleGrpc != nil { + size, err := m.GoogleGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_EnvoyGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxReceiveMessageLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipEnvoyHeaders { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RootCerts != nil { + l = m.RootCerts.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + l = m.PrivateKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CertChain != nil { + l = m.CertChain.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SslCredentials != nil { + l = m.SslCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleDefault != nil { + l = (*emptypb.Empty)(m.GoogleDefault).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalCredentials != nil { + l = m.LocalCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JsonKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TokenLifetimeSeconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TokenLifetimeSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AuthorizationToken) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AuthoritySelector) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TokenExchangeServiceUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Audience) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Scope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestedTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleComputeEngine != nil { + l = (*emptypb.Empty)(m.GoogleComputeEngine).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GoogleRefreshToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ServiceAccountJwtAccess != nil { + l = m.ServiceAccountJwtAccess.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleIam != nil { + l = m.GoogleIam.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FromPlugin != nil { + l = m.FromPlugin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StsService != nil { + l = m.StsService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for k, v := range m.Args { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelCredentials != nil { + l = m.ChannelCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CallCredentials) > 0 { + for _, e := range m.CallCredentials { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CredentialsFactoryName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerStreamBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelArgs != nil { + l = m.ChannelArgs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.TargetSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_EnvoyGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyGrpc != nil { + l = m.EnvoyGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleGrpc != nil { + l = m.GoogleGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go new file mode 100644 index 000000000..96ac5fc63 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go @@ -0,0 +1,1705 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Endpoint health status. +type HealthStatus int32 + +const ( + // The health status is not known. This is interpreted by Envoy as “HEALTHY“. + HealthStatus_UNKNOWN HealthStatus = 0 + // Healthy. + HealthStatus_HEALTHY HealthStatus = 1 + // Unhealthy. + HealthStatus_UNHEALTHY HealthStatus = 2 + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as “UNHEALTHY“. + HealthStatus_DRAINING HealthStatus = 3 + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // “UNHEALTHY“. + HealthStatus_TIMEOUT HealthStatus = 4 + // Degraded. + HealthStatus_DEGRADED HealthStatus = 5 +) + +// Enum value maps for HealthStatus. +var ( + HealthStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "DRAINING", + 4: "TIMEOUT", + 5: "DEGRADED", + } + HealthStatus_value = map[string]int32{ + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "DRAINING": 3, + "TIMEOUT": 4, + "DEGRADED": 5, + } +) + +func (x HealthStatus) Enum() *HealthStatus { + p := new(HealthStatus) + *p = x + return p +} + +func (x HealthStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_health_check_proto_enumTypes[0].Descriptor() +} + +func (HealthStatus) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_health_check_proto_enumTypes[0] +} + +func (x HealthStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthStatus.Descriptor instead. +func (HealthStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0} +} + +type HealthStatusSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An order-independent set of health status. + Statuses []HealthStatus `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.core.v3.HealthStatus" json:"statuses,omitempty"` +} + +func (x *HealthStatusSet) Reset() { + *x = HealthStatusSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthStatusSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthStatusSet) ProtoMessage() {} + +func (x *HealthStatusSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthStatusSet.ProtoReflect.Descriptor instead. +func (*HealthStatusSet) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthStatusSet) GetStatuses() []HealthStatus { + if x != nil { + return x.Statuses + } + return nil +} + +// [#next-free-field: 27] +type HealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + Timeout *durationpb.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The interval between health checks. + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + InitialJitter *durationpb.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"` + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + IntervalJitter *durationpb.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add “interval_ms“ * + // “interval_jitter_percent“ / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"` + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for “http“ health checking if a host responds with a code not in + // :ref:`expected_statuses ` + // or :ref:`retriable_statuses `, + // this threshold is ignored and the host is considered immediately unhealthy. + UnhealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"` + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + HealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"` + // [#not-implemented-hide:] Non-serving port for health checking. + AltPort *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"` + // Reuse health check connection between health checks. Default is true. + ReuseConnection *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"` + // Types that are assignable to HealthChecker: + // + // *HealthCheck_HttpHealthCheck_ + // *HealthCheck_TcpHealthCheck_ + // *HealthCheck_GrpcHealthCheck_ + // *HealthCheck_CustomHealthCheck_ + HealthChecker isHealthCheck_HealthChecker `protobuf_oneof:"health_checker"` + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + NoTrafficInterval *durationpb.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"` + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // “no_traffic_interval“ but then revert to lower frequency “no_traffic_healthy_interval“ once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + NoTrafficHealthyInterval *durationpb.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"` + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + UnhealthyInterval *durationpb.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"` + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + UnhealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"` + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + HealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` + // .. attention:: + // This field is deprecated in favor of the extension + // :ref:`event_logger ` and + // :ref:`event_log_path ` + // in the file sink extension. + // + // Specifies the path to the :ref:`health check event log `. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/health_check.proto. + EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` + // A list of event log sinks to process the health check event. + // [#extension-category: envoy.health_check.event_sinks] + EventLogger []*TypedExtensionConfig `protobuf:"bytes,25,rep,name=event_logger,json=eventLogger,proto3" json:"event_logger,omitempty"` + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventService *EventServiceConfig `protobuf:"bytes,22,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"` + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"` + // If set to true, health check success events will always be logged. If set to false, only host addition event will be logged + // if it is the first successful health check, or if the healthy threshold is reached. + // The default value is false. + AlwaysLogHealthCheckSuccess bool `protobuf:"varint,26,opt,name=always_log_health_check_success,json=alwaysLogHealthCheckSuccess,proto3" json:"always_log_health_check_success,omitempty"` + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"` + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of “envoy.transport_socket“ in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + TransportSocketMatchCriteria *structpb.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"` +} + +func (x *HealthCheck) Reset() { + *x = HealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck) ProtoMessage() {} + +func (x *HealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheck) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *HealthCheck) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *HealthCheck) GetInitialJitter() *durationpb.Duration { + if x != nil { + return x.InitialJitter + } + return nil +} + +func (x *HealthCheck) GetIntervalJitter() *durationpb.Duration { + if x != nil { + return x.IntervalJitter + } + return nil +} + +func (x *HealthCheck) GetIntervalJitterPercent() uint32 { + if x != nil { + return x.IntervalJitterPercent + } + return 0 +} + +func (x *HealthCheck) GetUnhealthyThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.UnhealthyThreshold + } + return nil +} + +func (x *HealthCheck) GetHealthyThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.HealthyThreshold + } + return nil +} + +func (x *HealthCheck) GetAltPort() *wrapperspb.UInt32Value { + if x != nil { + return x.AltPort + } + return nil +} + +func (x *HealthCheck) GetReuseConnection() *wrapperspb.BoolValue { + if x != nil { + return x.ReuseConnection + } + return nil +} + +func (m *HealthCheck) GetHealthChecker() isHealthCheck_HealthChecker { + if m != nil { + return m.HealthChecker + } + return nil +} + +func (x *HealthCheck) GetHttpHealthCheck() *HealthCheck_HttpHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_HttpHealthCheck_); ok { + return x.HttpHealthCheck + } + return nil +} + +func (x *HealthCheck) GetTcpHealthCheck() *HealthCheck_TcpHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_TcpHealthCheck_); ok { + return x.TcpHealthCheck + } + return nil +} + +func (x *HealthCheck) GetGrpcHealthCheck() *HealthCheck_GrpcHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_GrpcHealthCheck_); ok { + return x.GrpcHealthCheck + } + return nil +} + +func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_CustomHealthCheck_); ok { + return x.CustomHealthCheck + } + return nil +} + +func (x *HealthCheck) GetNoTrafficInterval() *durationpb.Duration { + if x != nil { + return x.NoTrafficInterval + } + return nil +} + +func (x *HealthCheck) GetNoTrafficHealthyInterval() *durationpb.Duration { + if x != nil { + return x.NoTrafficHealthyInterval + } + return nil +} + +func (x *HealthCheck) GetUnhealthyInterval() *durationpb.Duration { + if x != nil { + return x.UnhealthyInterval + } + return nil +} + +func (x *HealthCheck) GetUnhealthyEdgeInterval() *durationpb.Duration { + if x != nil { + return x.UnhealthyEdgeInterval + } + return nil +} + +func (x *HealthCheck) GetHealthyEdgeInterval() *durationpb.Duration { + if x != nil { + return x.HealthyEdgeInterval + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/health_check.proto. +func (x *HealthCheck) GetEventLogPath() string { + if x != nil { + return x.EventLogPath + } + return "" +} + +func (x *HealthCheck) GetEventLogger() []*TypedExtensionConfig { + if x != nil { + return x.EventLogger + } + return nil +} + +func (x *HealthCheck) GetEventService() *EventServiceConfig { + if x != nil { + return x.EventService + } + return nil +} + +func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool { + if x != nil { + return x.AlwaysLogHealthCheckFailures + } + return false +} + +func (x *HealthCheck) GetAlwaysLogHealthCheckSuccess() bool { + if x != nil { + return x.AlwaysLogHealthCheckSuccess + } + return false +} + +func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions { + if x != nil { + return x.TlsOptions + } + return nil +} + +func (x *HealthCheck) GetTransportSocketMatchCriteria() *structpb.Struct { + if x != nil { + return x.TransportSocketMatchCriteria + } + return nil +} + +type isHealthCheck_HealthChecker interface { + isHealthCheck_HealthChecker() +} + +type HealthCheck_HttpHealthCheck_ struct { + // HTTP health check. + HttpHealthCheck *HealthCheck_HttpHealthCheck `protobuf:"bytes,8,opt,name=http_health_check,json=httpHealthCheck,proto3,oneof"` +} + +type HealthCheck_TcpHealthCheck_ struct { + // TCP health check. + TcpHealthCheck *HealthCheck_TcpHealthCheck `protobuf:"bytes,9,opt,name=tcp_health_check,json=tcpHealthCheck,proto3,oneof"` +} + +type HealthCheck_GrpcHealthCheck_ struct { + // gRPC health check. + GrpcHealthCheck *HealthCheck_GrpcHealthCheck `protobuf:"bytes,11,opt,name=grpc_health_check,json=grpcHealthCheck,proto3,oneof"` +} + +type HealthCheck_CustomHealthCheck_ struct { + // Custom health check. + CustomHealthCheck *HealthCheck_CustomHealthCheck `protobuf:"bytes,13,opt,name=custom_health_check,json=customHealthCheck,proto3,oneof"` +} + +func (*HealthCheck_HttpHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_TcpHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_GrpcHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_CustomHealthCheck_) isHealthCheck_HealthChecker() {} + +// Describes the encoding of the payload bytes in the payload. +type HealthCheck_Payload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *HealthCheck_Payload_Text + // *HealthCheck_Payload_Binary + Payload isHealthCheck_Payload_Payload `protobuf_oneof:"payload"` +} + +func (x *HealthCheck_Payload) Reset() { + *x = HealthCheck_Payload{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_Payload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_Payload) ProtoMessage() {} + +func (x *HealthCheck_Payload) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_Payload.ProtoReflect.Descriptor instead. +func (*HealthCheck_Payload) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 0} +} + +func (m *HealthCheck_Payload) GetPayload() isHealthCheck_Payload_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *HealthCheck_Payload) GetText() string { + if x, ok := x.GetPayload().(*HealthCheck_Payload_Text); ok { + return x.Text + } + return "" +} + +func (x *HealthCheck_Payload) GetBinary() []byte { + if x, ok := x.GetPayload().(*HealthCheck_Payload_Binary); ok { + return x.Binary + } + return nil +} + +type isHealthCheck_Payload_Payload interface { + isHealthCheck_Payload_Payload() +} + +type HealthCheck_Payload_Text struct { + // Hex encoded payload. E.g., "000000FF". + Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"` +} + +type HealthCheck_Payload_Binary struct { + // Binary payload. + Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"` +} + +func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {} + +func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {} + +// [#next-free-field: 15] +type HealthCheck_HttpHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // Specifies the HTTP path that will be requested during health checking. For example + // “/healthcheck“. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // [#not-implemented-hide:] HTTP specific payload. + Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"` + // Specifies a list of HTTP expected responses to match in the first “response_buffer_size“ bytes of the response body. + // If it is set, both the expected response check and status code determine the health check. + // When checking the response, “fuzzy” matching is performed such that each payload block must be found, + // and in the order specified, but not necessarily contiguous. + // + // .. note:: + // + // It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + // The default buffer size is 1024 bytes when it is not set. + Receive []*HealthCheck_Payload `protobuf:"bytes,4,rep,name=receive,proto3" json:"receive,omitempty"` + // Specifies the size of response buffer in bytes that is used to Payload match. + // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + ResponseBufferSize *wrapperspb.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"` + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. + ExpectedStatuses []*v3.Int64Range `protobuf:"bytes,9,rep,name=expected_statuses,json=expectedStatuses,proto3" json:"expected_statuses,omitempty"` + // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + // will count towards the configured :ref:`unhealthy_threshold `, + // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + // :ref:`Int64Range `. The start and end of each range are required. + // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + // be considered a successful health check. By default all responses not in + // :ref:`expected_statuses ` will result in + // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + // non-200 response will result in the host being marked unhealthy. + RetriableStatuses []*v3.Int64Range `protobuf:"bytes,12,rep,name=retriable_statuses,json=retriableStatuses,proto3" json:"retriable_statuses,omitempty"` + // Use specified application protocol for health checks. + CodecClientType v3.CodecClientType `protobuf:"varint,10,opt,name=codec_client_type,json=codecClientType,proto3,enum=envoy.type.v3.CodecClientType" json:"codec_client_type,omitempty"` + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + ServiceNameMatcher *v31.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"` + // HTTP Method that will be used for health checking, default is "GET". + // GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported. + // CONNECT method is disallowed because it is not appropriate for health check request. + // If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + Method RequestMethod `protobuf:"varint,13,opt,name=method,proto3,enum=envoy.config.core.v3.RequestMethod" json:"method,omitempty"` +} + +func (x *HealthCheck_HttpHealthCheck) Reset() { + *x = HealthCheck_HttpHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_HttpHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_HttpHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_HttpHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_HttpHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_HttpHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *HealthCheck_HttpHealthCheck) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *HealthCheck_HttpHealthCheck) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload { + if x != nil { + return x.Send + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetReceive() []*HealthCheck_Payload { + if x != nil { + return x.Receive + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrapperspb.UInt64Value { + if x != nil { + return x.ResponseBufferSize + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetExpectedStatuses() []*v3.Int64Range { + if x != nil { + return x.ExpectedStatuses + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRetriableStatuses() []*v3.Int64Range { + if x != nil { + return x.RetriableStatuses + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetCodecClientType() v3.CodecClientType { + if x != nil { + return x.CodecClientType + } + return v3.CodecClientType(0) +} + +func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *v31.StringMatcher { + if x != nil { + return x.ServiceNameMatcher + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetMethod() RequestMethod { + if x != nil { + return x.Method + } + return RequestMethod_METHOD_UNSPECIFIED +} + +type HealthCheck_TcpHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Empty payloads imply a connect-only health check. + Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"` + // When checking the response, “fuzzy” matching is performed such that each + // payload block must be found, and in the order specified, but not + // necessarily contiguous. + Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"` + // When setting this value, it tries to attempt health check request with ProxyProtocol. + // When “send“ is presented, they are sent after preceding ProxyProtocol header. + // Only ProxyProtocol header is sent when “send“ is not presented. + // It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes + // LOCAL command and doesn't include L3/L4. + ProxyProtocolConfig *ProxyProtocolConfig `protobuf:"bytes,3,opt,name=proxy_protocol_config,json=proxyProtocolConfig,proto3" json:"proxy_protocol_config,omitempty"` +} + +func (x *HealthCheck_TcpHealthCheck) Reset() { + *x = HealthCheck_TcpHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_TcpHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_TcpHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_TcpHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_TcpHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_TcpHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *HealthCheck_TcpHealthCheck) GetSend() *HealthCheck_Payload { + if x != nil { + return x.Send + } + return nil +} + +func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload { + if x != nil { + return x.Receive + } + return nil +} + +func (x *HealthCheck_TcpHealthCheck) GetProxyProtocolConfig() *ProxyProtocolConfig { + if x != nil { + return x.ProxyProtocolConfig + } + return nil +} + +type HealthCheck_RedisHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set, optionally perform “EXISTS “ instead of “PING“. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *HealthCheck_RedisHealthCheck) Reset() { + *x = HealthCheck_RedisHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_RedisHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_RedisHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_RedisHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_RedisHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_RedisHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *HealthCheck_RedisHealthCheck) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// `grpc.health.v1.Health +// `_-based +// healthcheck. See `gRPC doc `_ +// for details. +type HealthCheck_GrpcHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // Specifies a list of key-value pairs that should be added to the metadata of each GRPC call + // that is sent to the health checked cluster. For more information, including details on header value syntax, + // see the documentation on :ref:`custom request headers + // `. + InitialMetadata []*HeaderValueOption `protobuf:"bytes,3,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` +} + +func (x *HealthCheck_GrpcHealthCheck) Reset() { + *x = HealthCheck_GrpcHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_GrpcHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_GrpcHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_GrpcHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_GrpcHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_GrpcHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *HealthCheck_GrpcHealthCheck) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *HealthCheck_GrpcHealthCheck) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *HealthCheck_GrpcHealthCheck) GetInitialMetadata() []*HeaderValueOption { + if x != nil { + return x.InitialMetadata + } + return nil +} + +// Custom health check. +type HealthCheck_CustomHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The registered name of the custom health checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + // [#extension-category: envoy.health_checkers] + // + // Types that are assignable to ConfigType: + // + // *HealthCheck_CustomHealthCheck_TypedConfig + ConfigType isHealthCheck_CustomHealthCheck_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *HealthCheck_CustomHealthCheck) Reset() { + *x = HealthCheck_CustomHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_CustomHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_CustomHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_CustomHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_CustomHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_CustomHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *HealthCheck_CustomHealthCheck) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHealthCheck_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isHealthCheck_CustomHealthCheck_ConfigType interface { + isHealthCheck_CustomHealthCheck_ConfigType() +} + +type HealthCheck_CustomHealthCheck_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {} + +// Health checks occur over the transport socket specified for the cluster. This implies that if a +// cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. +// +// This allows overriding the cluster TLS settings, just for health check connections. +type HealthCheck_TlsOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + AlpnProtocols []string `protobuf:"bytes,1,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"` +} + +func (x *HealthCheck_TlsOptions) Reset() { + *x = HealthCheck_TlsOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_TlsOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_TlsOptions) ProtoMessage() {} + +func (x *HealthCheck_TlsOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_TlsOptions.ProtoReflect.Descriptor instead. +func (*HealthCheck_TlsOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 6} +} + +func (x *HealthCheck_TlsOptions) GetAlpnProtocols() []string { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +var File_envoy_config_core_v3_health_check_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, + 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0x8c, 0x20, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, + 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x75, 0x6e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x75, + 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, + 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, + 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x18, 0x6e, 0x6f, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x5b, 0x0a, 0x17, 0x75, 0x6e, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, + 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x13, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, + 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1b, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, + 0x0a, 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, + 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xc6, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, + 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x12, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, + 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, + 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x48, + 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, + 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, + 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, + 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xa8, 0x02, 0x0a, 0x0e, 0x54, + 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, + 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, + 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, + 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, + 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, + 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, + 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, + 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_health_check_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_health_check_proto_rawDescData = file_envoy_config_core_v3_health_check_proto_rawDesc +) + +func file_envoy_config_core_v3_health_check_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_health_check_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_health_check_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_health_check_proto_rawDescData) + }) + return file_envoy_config_core_v3_health_check_proto_rawDescData +} + +var file_envoy_config_core_v3_health_check_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_health_check_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{ + (HealthStatus)(0), // 0: envoy.config.core.v3.HealthStatus + (*HealthStatusSet)(nil), // 1: envoy.config.core.v3.HealthStatusSet + (*HealthCheck)(nil), // 2: envoy.config.core.v3.HealthCheck + (*HealthCheck_Payload)(nil), // 3: envoy.config.core.v3.HealthCheck.Payload + (*HealthCheck_HttpHealthCheck)(nil), // 4: envoy.config.core.v3.HealthCheck.HttpHealthCheck + (*HealthCheck_TcpHealthCheck)(nil), // 5: envoy.config.core.v3.HealthCheck.TcpHealthCheck + (*HealthCheck_RedisHealthCheck)(nil), // 6: envoy.config.core.v3.HealthCheck.RedisHealthCheck + (*HealthCheck_GrpcHealthCheck)(nil), // 7: envoy.config.core.v3.HealthCheck.GrpcHealthCheck + (*HealthCheck_CustomHealthCheck)(nil), // 8: envoy.config.core.v3.HealthCheck.CustomHealthCheck + (*HealthCheck_TlsOptions)(nil), // 9: envoy.config.core.v3.HealthCheck.TlsOptions + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 11: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig + (*EventServiceConfig)(nil), // 14: envoy.config.core.v3.EventServiceConfig + (*structpb.Struct)(nil), // 15: google.protobuf.Struct + (*wrapperspb.UInt64Value)(nil), // 16: google.protobuf.UInt64Value + (*HeaderValueOption)(nil), // 17: envoy.config.core.v3.HeaderValueOption + (*v3.Int64Range)(nil), // 18: envoy.type.v3.Int64Range + (v3.CodecClientType)(0), // 19: envoy.type.v3.CodecClientType + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (RequestMethod)(0), // 21: envoy.config.core.v3.RequestMethod + (*ProxyProtocolConfig)(nil), // 22: envoy.config.core.v3.ProxyProtocolConfig + (*anypb.Any)(nil), // 23: google.protobuf.Any +} +var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus + 10, // 1: envoy.config.core.v3.HealthCheck.timeout:type_name -> google.protobuf.Duration + 10, // 2: envoy.config.core.v3.HealthCheck.interval:type_name -> google.protobuf.Duration + 10, // 3: envoy.config.core.v3.HealthCheck.initial_jitter:type_name -> google.protobuf.Duration + 10, // 4: envoy.config.core.v3.HealthCheck.interval_jitter:type_name -> google.protobuf.Duration + 11, // 5: envoy.config.core.v3.HealthCheck.unhealthy_threshold:type_name -> google.protobuf.UInt32Value + 11, // 6: envoy.config.core.v3.HealthCheck.healthy_threshold:type_name -> google.protobuf.UInt32Value + 11, // 7: envoy.config.core.v3.HealthCheck.alt_port:type_name -> google.protobuf.UInt32Value + 12, // 8: envoy.config.core.v3.HealthCheck.reuse_connection:type_name -> google.protobuf.BoolValue + 4, // 9: envoy.config.core.v3.HealthCheck.http_health_check:type_name -> envoy.config.core.v3.HealthCheck.HttpHealthCheck + 5, // 10: envoy.config.core.v3.HealthCheck.tcp_health_check:type_name -> envoy.config.core.v3.HealthCheck.TcpHealthCheck + 7, // 11: envoy.config.core.v3.HealthCheck.grpc_health_check:type_name -> envoy.config.core.v3.HealthCheck.GrpcHealthCheck + 8, // 12: envoy.config.core.v3.HealthCheck.custom_health_check:type_name -> envoy.config.core.v3.HealthCheck.CustomHealthCheck + 10, // 13: envoy.config.core.v3.HealthCheck.no_traffic_interval:type_name -> google.protobuf.Duration + 10, // 14: envoy.config.core.v3.HealthCheck.no_traffic_healthy_interval:type_name -> google.protobuf.Duration + 10, // 15: envoy.config.core.v3.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration + 10, // 16: envoy.config.core.v3.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration + 10, // 17: envoy.config.core.v3.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration + 13, // 18: envoy.config.core.v3.HealthCheck.event_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 14, // 19: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 9, // 20: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions + 15, // 21: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct + 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value + 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 18, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range + 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range + 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType + 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 30: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod + 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 32: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 22, // 33: envoy.config.core.v3.HealthCheck.TcpHealthCheck.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 17, // 34: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption + 23, // 35: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_health_check_proto_init() } +func file_envoy_config_core_v3_health_check_proto_init() { + if File_envoy_config_core_v3_health_check_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_event_service_config_proto_init() + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_proxy_protocol_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthStatusSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_Payload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_HttpHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_TcpHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_RedisHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_GrpcHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_CustomHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_TlsOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HealthCheck_HttpHealthCheck_)(nil), + (*HealthCheck_TcpHealthCheck_)(nil), + (*HealthCheck_GrpcHealthCheck_)(nil), + (*HealthCheck_CustomHealthCheck_)(nil), + } + file_envoy_config_core_v3_health_check_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*HealthCheck_Payload_Text)(nil), + (*HealthCheck_Payload_Binary)(nil), + } + file_envoy_config_core_v3_health_check_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*HealthCheck_CustomHealthCheck_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_health_check_proto_rawDesc, + NumEnums: 1, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_health_check_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_health_check_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_health_check_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_health_check_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_health_check_proto = out.File + file_envoy_config_core_v3_health_check_proto_rawDesc = nil + file_envoy_config_core_v3_health_check_proto_goTypes = nil + file_envoy_config_core_v3_health_check_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go new file mode 100644 index 000000000..707776073 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go @@ -0,0 +1,2291 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.CodecClientType(0) +) + +// Validate checks the field values on HealthStatusSet with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HealthStatusSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthStatusSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthStatusSetMultiError, or nil if none found. +func (m *HealthStatusSet) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthStatusSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatuses() { + _, _ = idx, item + + if _, ok := HealthStatus_name[int32(item)]; !ok { + err := HealthStatusSetValidationError{ + field: fmt.Sprintf("Statuses[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return HealthStatusSetMultiError(errors) + } + + return nil +} + +// HealthStatusSetMultiError is an error wrapping multiple validation errors +// returned by HealthStatusSet.ValidateAll() if the designated constraints +// aren't met. +type HealthStatusSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthStatusSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthStatusSetMultiError) AllErrors() []error { return m } + +// HealthStatusSetValidationError is the validation error returned by +// HealthStatusSet.Validate if the designated constraints aren't met. +type HealthStatusSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthStatusSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthStatusSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthStatusSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthStatusSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthStatusSetValidationError) ErrorName() string { return "HealthStatusSetValidationError" } + +// Error satisfies the builtin error interface +func (e HealthStatusSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthStatusSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthStatusSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthStatusSetValidationError{} + +// Validate checks the field values on HealthCheck with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HealthCheckMultiError, or +// nil if none found. +func (m *HealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetTimeout() == nil { + err := HealthCheckValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "Timeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetInterval() == nil { + err := HealthCheckValidationError{ + field: "Interval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "Interval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetInitialJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInitialJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIntervalJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IntervalJitterPercent + + if m.GetUnhealthyThreshold() == nil { + err := HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetUnhealthyThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUnhealthyThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetHealthyThreshold() == nil { + err := HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHealthyThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthyThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAltPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAltPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetReuseConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReuseConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetNoTrafficInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "NoTrafficInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "NoTrafficInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetNoTrafficHealthyInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "NoTrafficHealthyInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "NoTrafficHealthyInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetUnhealthyInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "UnhealthyInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "UnhealthyInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetUnhealthyEdgeInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "UnhealthyEdgeInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "UnhealthyEdgeInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetHealthyEdgeInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "HealthyEdgeInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "HealthyEdgeInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for EventLogPath + + for idx, item := range m.GetEventLogger() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetEventService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AlwaysLogHealthCheckFailures + + // no validation rules for AlwaysLogHealthCheckSuccess + + if all { + switch v := interface{}(m.GetTlsOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocketMatchCriteria()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocketMatchCriteria()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofHealthCheckerPresent := false + switch v := m.HealthChecker.(type) { + case *HealthCheck_HttpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetHttpHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_TcpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetTcpHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_GrpcHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetGrpcHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_CustomHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetCustomHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofHealthCheckerPresent { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheckMultiError is an error wrapping multiple validation errors +// returned by HealthCheck.ValidateAll() if the designated constraints aren't met. +type HealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheckValidationError is the validation error returned by +// HealthCheck.Validate if the designated constraints aren't met. +type HealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheckValidationError) ErrorName() string { return "HealthCheckValidationError" } + +// Error satisfies the builtin error interface +func (e HealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_Payload with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_Payload) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_Payload with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_PayloadMultiError, or nil if none found. +func (m *HealthCheck_Payload) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_Payload) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofPayloadPresent := false + switch v := m.Payload.(type) { + case *HealthCheck_Payload_Text: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true + + if utf8.RuneCountInString(m.GetText()) < 1 { + err := HealthCheck_PayloadValidationError{ + field: "Text", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HealthCheck_Payload_Binary: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true + // no validation rules for Binary + default: + _ = v // ensures v is used + } + if !oneofPayloadPresent { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheck_PayloadMultiError(errors) + } + + return nil +} + +// HealthCheck_PayloadMultiError is an error wrapping multiple validation +// errors returned by HealthCheck_Payload.ValidateAll() if the designated +// constraints aren't met. +type HealthCheck_PayloadMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_PayloadMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_PayloadMultiError) AllErrors() []error { return m } + +// HealthCheck_PayloadValidationError is the validation error returned by +// HealthCheck_Payload.Validate if the designated constraints aren't met. +type HealthCheck_PayloadValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_PayloadValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_PayloadValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_PayloadValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_PayloadValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_PayloadValidationError) ErrorName() string { + return "HealthCheck_PayloadValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_PayloadValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_Payload.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_PayloadValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_PayloadValidationError{} + +// Validate checks the field values on HealthCheck_HttpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_HttpHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_HttpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_HttpHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_HttpHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HealthCheck_HttpHealthCheck_Host_Pattern.MatchString(m.GetHost()) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Host", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HealthCheck_HttpHealthCheck_Path_Pattern.MatchString(m.GetPath()) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Path", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetReceive() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetResponseBufferSize(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "ResponseBufferSize", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern.MatchString(item) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetExpectedStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetriableStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := v3.CodecClientType_name[int32(m.GetCodecClientType())]; !ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "CodecClientType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetServiceNameMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServiceNameMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := _HealthCheck_HttpHealthCheck_Method_NotInLookup[m.GetMethod()]; ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must not be in list [CONNECT]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := RequestMethod_name[int32(m.GetMethod())]; !ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheck_HttpHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_HttpHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_HttpHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_HttpHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_HttpHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_HttpHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_HttpHealthCheckValidationError is the validation error returned +// by HealthCheck_HttpHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_HttpHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_HttpHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_HttpHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_HttpHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_HttpHealthCheckValidationError) ErrorName() string { + return "HealthCheck_HttpHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_HttpHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_HttpHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_HttpHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_HttpHealthCheckValidationError{} + +var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") + +var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") + +var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HealthCheck_HttpHealthCheck_Method_NotInLookup = map[RequestMethod]struct{}{ + 6: {}, +} + +// Validate checks the field values on HealthCheck_TcpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_TcpHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_TcpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_TcpHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_TcpHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_TcpHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetReceive() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetProxyProtocolConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyProtocolConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HealthCheck_TcpHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_TcpHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_TcpHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_TcpHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_TcpHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_TcpHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_TcpHealthCheckValidationError is the validation error returned +// by HealthCheck_TcpHealthCheck.Validate if the designated constraints aren't met. +type HealthCheck_TcpHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_TcpHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_TcpHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_TcpHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_TcpHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_TcpHealthCheckValidationError) ErrorName() string { + return "HealthCheck_TcpHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_TcpHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_TcpHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_TcpHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_TcpHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_RedisHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_RedisHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_RedisHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_RedisHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_RedisHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_RedisHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + if len(errors) > 0 { + return HealthCheck_RedisHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_RedisHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_RedisHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_RedisHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_RedisHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_RedisHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_RedisHealthCheckValidationError is the validation error returned +// by HealthCheck_RedisHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_RedisHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_RedisHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_RedisHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_RedisHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_RedisHealthCheckValidationError) ErrorName() string { + return "HealthCheck_RedisHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_RedisHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_RedisHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_RedisHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_RedisHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_GrpcHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_GrpcHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_GrpcHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_GrpcHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_GrpcHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_GrpcHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceName + + if !_HealthCheck_GrpcHealthCheck_Authority_Pattern.MatchString(m.GetAuthority()) { + err := HealthCheck_GrpcHealthCheckValidationError{ + field: "Authority", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetInitialMetadata()) > 1000 { + err := HealthCheck_GrpcHealthCheckValidationError{ + field: "InitialMetadata", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetInitialMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HealthCheck_GrpcHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_GrpcHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_GrpcHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_GrpcHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_GrpcHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_GrpcHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_GrpcHealthCheckValidationError is the validation error returned +// by HealthCheck_GrpcHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_GrpcHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_GrpcHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_GrpcHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_GrpcHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_GrpcHealthCheckValidationError) ErrorName() string { + return "HealthCheck_GrpcHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_GrpcHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_GrpcHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_GrpcHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_GrpcHealthCheckValidationError{} + +var _HealthCheck_GrpcHealthCheck_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HealthCheck_CustomHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_CustomHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_CustomHealthCheck with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HealthCheck_CustomHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_CustomHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_CustomHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HealthCheck_CustomHealthCheckValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *HealthCheck_CustomHealthCheck_TypedConfig: + if v == nil { + err := HealthCheck_CustomHealthCheckValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HealthCheck_CustomHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_CustomHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_CustomHealthCheck.ValidateAll() +// if the designated constraints aren't met. +type HealthCheck_CustomHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_CustomHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_CustomHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_CustomHealthCheckValidationError is the validation error +// returned by HealthCheck_CustomHealthCheck.Validate if the designated +// constraints aren't met. +type HealthCheck_CustomHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_CustomHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_CustomHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_CustomHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_CustomHealthCheckValidationError) ErrorName() string { + return "HealthCheck_CustomHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_CustomHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_CustomHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_CustomHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_CustomHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_TlsOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_TlsOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_TlsOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_TlsOptionsMultiError, or nil if none found. +func (m *HealthCheck_TlsOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_TlsOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HealthCheck_TlsOptionsMultiError(errors) + } + + return nil +} + +// HealthCheck_TlsOptionsMultiError is an error wrapping multiple validation +// errors returned by HealthCheck_TlsOptions.ValidateAll() if the designated +// constraints aren't met. +type HealthCheck_TlsOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_TlsOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_TlsOptionsMultiError) AllErrors() []error { return m } + +// HealthCheck_TlsOptionsValidationError is the validation error returned by +// HealthCheck_TlsOptions.Validate if the designated constraints aren't met. +type HealthCheck_TlsOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_TlsOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_TlsOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_TlsOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_TlsOptionsValidationError) ErrorName() string { + return "HealthCheck_TlsOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_TlsOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_TlsOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_TlsOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_TlsOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go new file mode 100644 index 000000000..892ea5e4e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go @@ -0,0 +1,1384 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HealthStatusSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthStatusSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthStatusSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_Payload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Binary); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Text); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload_Text) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Text) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Text) + copy(dAtA[i:], m.Text) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Text))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HealthCheck_Payload_Binary) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Binary) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Binary) + copy(dAtA[i:], m.Binary) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Binary))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HealthCheck_HttpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ResponseBufferSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.ResponseBufferSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.Method != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Method)) + i-- + dAtA[i] = 0x68 + } + if len(m.RetriableStatuses) > 0 { + for iNdEx := len(m.RetriableStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetriableStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetriableStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.ServiceNameMatcher != nil { + if vtmsg, ok := interface{}(m.ServiceNameMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServiceNameMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.CodecClientType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecClientType)) + i-- + dAtA[i] = 0x50 + } + if len(m.ExpectedStatuses) > 0 { + for iNdEx := len(m.ExpectedStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ExpectedStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExpectedStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ProxyProtocolConfig != nil { + size, err := m.ProxyProtocolConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*HealthCheck_CustomHealthCheck_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TlsOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TlsOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TlsOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysLogHealthCheckSuccess { + i-- + if m.AlwaysLogHealthCheckSuccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.EventLogger) > 0 { + for iNdEx := len(m.EventLogger) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EventLogger[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.NoTrafficHealthyInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficHealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.TransportSocketMatchCriteria != nil { + size, err := (*structpb.Struct)(m.TransportSocketMatchCriteria).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.EventService != nil { + size, err := m.EventService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.TlsOptions != nil { + size, err := m.TlsOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.InitialJitter != nil { + size, err := (*durationpb.Duration)(m.InitialJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.AlwaysLogHealthCheckFailures { + i-- + if m.AlwaysLogHealthCheckFailures { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.IntervalJitterPercent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntervalJitterPercent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.HealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.HealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.UnhealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UnhealthyInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if msg, ok := m.HealthChecker.(*HealthCheck_CustomHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NoTrafficInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HealthChecker.(*HealthCheck_GrpcHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_TcpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_HttpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ReuseConnection != nil { + size, err := (*wrapperspb.BoolValue)(m.ReuseConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AltPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.AltPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.HealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UnhealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + size, err := (*durationpb.Duration)(m.IntervalJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpHealthCheck != nil { + size, err := m.HttpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TcpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TcpHealthCheck != nil { + size, err := m.TcpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_GrpcHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcHealthCheck != nil { + size, err := m.GrpcHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_CustomHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomHealthCheck != nil { + size, err := m.CustomHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *HealthStatusSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Payload.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload_Text) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Text) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_Payload_Binary) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Binary) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_HttpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExpectedStatuses) > 0 { + for _, e := range m.ExpectedStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.CodecClientType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecClientType)) + } + if m.ServiceNameMatcher != nil { + if size, ok := interface{}(m.ServiceNameMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServiceNameMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableStatuses) > 0 { + for _, e := range m.RetriableStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Method != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Method)) + } + if m.ResponseBufferSize != nil { + l = (*wrapperspb.UInt64Value)(m.ResponseBufferSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_TcpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ProxyProtocolConfig != nil { + l = m.ProxyProtocolConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_RedisHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_GrpcHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TlsOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + l = (*durationpb.Duration)(m.IntervalJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.HealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AltPort != nil { + l = (*wrapperspb.UInt32Value)(m.AltPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReuseConnection != nil { + l = (*wrapperspb.BoolValue)(m.ReuseConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HealthChecker.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.NoTrafficInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyEdgeInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.HealthyEdgeInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EventLogPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitterPercent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.IntervalJitterPercent)) + } + if m.AlwaysLogHealthCheckFailures { + n += 3 + } + if m.InitialJitter != nil { + l = (*durationpb.Duration)(m.InitialJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsOptions != nil { + l = m.TlsOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + l = m.EventService.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketMatchCriteria != nil { + l = (*structpb.Struct)(m.TransportSocketMatchCriteria).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NoTrafficHealthyInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficHealthyInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.EventLogger) > 0 { + for _, e := range m.EventLogger { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysLogHealthCheckSuccess { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_HttpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpHealthCheck != nil { + l = m.HttpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TcpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpHealthCheck != nil { + l = m.TcpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_GrpcHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcHealthCheck != nil { + l = m.GrpcHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_CustomHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomHealthCheck != nil { + l = m.CustomHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go new file mode 100644 index 000000000..ec8d54bb7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HTTP service configuration. +type HttpService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The service's HTTP URI. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.myserviceapi.com/v1/data + // cluster: www.myserviceapi.com|443 + HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. + RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,2,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` +} + +func (x *HttpService) Reset() { + *x = HttpService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_http_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpService) ProtoMessage() {} + +func (x *HttpService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_http_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpService.ProtoReflect.Descriptor instead. +func (*HttpService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_http_service_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpService) GetHttpUri() *HttpUri { + if x != nil { + return x.HttpUri + } + return nil +} + +func (x *HttpService) GetRequestHeadersToAdd() []*HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +var File_envoy_config_core_v3_http_service_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_http_service_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb0, 0x01, + 0x0a, 0x0b, 0x48, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x38, 0x0a, + 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x52, 0x07, + 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, + 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x10, 0x48, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_http_service_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_http_service_proto_rawDescData = file_envoy_config_core_v3_http_service_proto_rawDesc +) + +func file_envoy_config_core_v3_http_service_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_http_service_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_http_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_service_proto_rawDescData) + }) + return file_envoy_config_core_v3_http_service_proto_rawDescData +} + +var file_envoy_config_core_v3_http_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_http_service_proto_goTypes = []interface{}{ + (*HttpService)(nil), // 0: envoy.config.core.v3.HttpService + (*HttpUri)(nil), // 1: envoy.config.core.v3.HttpUri + (*HeaderValueOption)(nil), // 2: envoy.config.core.v3.HeaderValueOption +} +var file_envoy_config_core_v3_http_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.HttpService.http_uri:type_name -> envoy.config.core.v3.HttpUri + 2, // 1: envoy.config.core.v3.HttpService.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_http_service_proto_init() } +func file_envoy_config_core_v3_http_service_proto_init() { + if File_envoy_config_core_v3_http_service_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_http_uri_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_http_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_http_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_http_service_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_http_service_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_http_service_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_http_service_proto = out.File + file_envoy_config_core_v3_http_service_proto_rawDesc = nil + file_envoy_config_core_v3_http_service_proto_goTypes = nil + file_envoy_config_core_v3_http_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go new file mode 100644 index 000000000..f1ce3fed0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go @@ -0,0 +1,210 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpServiceMultiError, or +// nil if none found. +func (m *HttpService) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttpUri()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := HttpServiceValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpServiceMultiError(errors) + } + + return nil +} + +// HttpServiceMultiError is an error wrapping multiple validation errors +// returned by HttpService.ValidateAll() if the designated constraints aren't met. +type HttpServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpServiceMultiError) AllErrors() []error { return m } + +// HttpServiceValidationError is the validation error returned by +// HttpService.Validate if the designated constraints aren't met. +type HttpServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpServiceValidationError) ErrorName() string { return "HttpServiceValidationError" } + +// Error satisfies the builtin error interface +func (e HttpServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpServiceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go new file mode 100644 index 000000000..64e0ece67 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go new file mode 100644 index 000000000..c1ba4357f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy external URI descriptor +type HttpUri struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // Specify how “uri“ is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + // + // Types that are assignable to HttpUpstreamType: + // + // *HttpUri_Cluster + HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"` + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *HttpUri) Reset() { + *x = HttpUri{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpUri) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpUri) ProtoMessage() {} + +func (x *HttpUri) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpUri.ProtoReflect.Descriptor instead. +func (*HttpUri) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_http_uri_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpUri) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (m *HttpUri) GetHttpUpstreamType() isHttpUri_HttpUpstreamType { + if m != nil { + return m.HttpUpstreamType + } + return nil +} + +func (x *HttpUri) GetCluster() string { + if x, ok := x.GetHttpUpstreamType().(*HttpUri_Cluster); ok { + return x.Cluster + } + return "" +} + +func (x *HttpUri) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type isHttpUri_HttpUpstreamType interface { + isHttpUri_HttpUpstreamType() +} + +type HttpUri_Cluster struct { + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3,oneof"` +} + +func (*HttpUri_Cluster) isHttpUri_HttpUpstreamType() {} + +var File_envoy_config_core_v3_http_uri_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_http_uri_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x07, 0x48, 0x74, 0x74, 0x70, 0x55, + 0x72, 0x69, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x23, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x47, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0xaa, 0x01, 0x0c, 0x08, 0x01, 0x1a, 0x06, 0x08, 0x80, 0x80, 0x80, 0x80, 0x10, + 0x32, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x20, 0x9a, 0xc5, 0x88, + 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x19, 0x0a, + 0x12, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x80, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_http_uri_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_http_uri_proto_rawDescData = file_envoy_config_core_v3_http_uri_proto_rawDesc +) + +func file_envoy_config_core_v3_http_uri_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_http_uri_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_http_uri_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_uri_proto_rawDescData) + }) + return file_envoy_config_core_v3_http_uri_proto_rawDescData +} + +var file_envoy_config_core_v3_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_http_uri_proto_goTypes = []interface{}{ + (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_config_core_v3_http_uri_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.HttpUri.timeout:type_name -> google.protobuf.Duration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_http_uri_proto_init() } +func file_envoy_config_core_v3_http_uri_proto_init() { + if File_envoy_config_core_v3_http_uri_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_http_uri_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpUri); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_http_uri_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HttpUri_Cluster)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_http_uri_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_http_uri_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_http_uri_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_http_uri_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_http_uri_proto = out.File + file_envoy_config_core_v3_http_uri_proto_rawDesc = nil + file_envoy_config_core_v3_http_uri_proto_goTypes = nil + file_envoy_config_core_v3_http_uri_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go new file mode 100644 index 000000000..bbf40c7ce --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go @@ -0,0 +1,228 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpUri with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpUri) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpUri with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in HttpUriMultiError, or nil if none found. +func (m *HttpUri) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpUri) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetUri()) < 1 { + err := HttpUriValidationError{ + field: "Uri", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTimeout() == nil { + err := HttpUriValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpUriValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(4294967296*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte || dur >= lt { + err := HttpUriValidationError{ + field: "Timeout", + reason: "value must be inside range [0s, 1193046h28m16s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + oneofHttpUpstreamTypePresent := false + switch v := m.HttpUpstreamType.(type) { + case *HttpUri_Cluster: + if v == nil { + err := HttpUriValidationError{ + field: "HttpUpstreamType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHttpUpstreamTypePresent = true + + if utf8.RuneCountInString(m.GetCluster()) < 1 { + err := HttpUriValidationError{ + field: "Cluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofHttpUpstreamTypePresent { + err := HttpUriValidationError{ + field: "HttpUpstreamType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpUriMultiError(errors) + } + + return nil +} + +// HttpUriMultiError is an error wrapping multiple validation errors returned +// by HttpUri.ValidateAll() if the designated constraints aren't met. +type HttpUriMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpUriMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpUriMultiError) AllErrors() []error { return m } + +// HttpUriValidationError is the validation error returned by HttpUri.Validate +// if the designated constraints aren't met. +type HttpUriValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpUriValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpUriValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpUriValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpUriValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpUriValidationError) ErrorName() string { return "HttpUriValidationError" } + +// Error satisfies the builtin error interface +func (e HttpUriValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpUri.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpUriValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpUriValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go new file mode 100644 index 000000000..73cd12f13 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go @@ -0,0 +1,123 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpUri) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpUri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.HttpUpstreamType.(*HttpUri_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpUri_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpUri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HttpUpstreamType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpUri_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go new file mode 100644 index 000000000..70be28739 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -0,0 +1,2453 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Action to take when Envoy receives client request with header names containing underscore +// characters. +// Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented +// as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore +// characters. +type HttpProtocolOptions_HeadersWithUnderscoresAction int32 + +const ( + // Allow headers with underscores. This is the default behavior. + HttpProtocolOptions_ALLOW HttpProtocolOptions_HeadersWithUnderscoresAction = 0 + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + HttpProtocolOptions_REJECT_REQUEST HttpProtocolOptions_HeadersWithUnderscoresAction = 1 + // Drop the client header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + HttpProtocolOptions_DROP_HEADER HttpProtocolOptions_HeadersWithUnderscoresAction = 2 +) + +// Enum value maps for HttpProtocolOptions_HeadersWithUnderscoresAction. +var ( + HttpProtocolOptions_HeadersWithUnderscoresAction_name = map[int32]string{ + 0: "ALLOW", + 1: "REJECT_REQUEST", + 2: "DROP_HEADER", + } + HttpProtocolOptions_HeadersWithUnderscoresAction_value = map[string]int32{ + "ALLOW": 0, + "REJECT_REQUEST": 1, + "DROP_HEADER": 2, + } +) + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Enum() *HttpProtocolOptions_HeadersWithUnderscoresAction { + p := new(HttpProtocolOptions_HeadersWithUnderscoresAction) + *p = x + return p +} + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpProtocolOptions_HeadersWithUnderscoresAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_protocol_proto_enumTypes[0].Descriptor() +} + +func (HttpProtocolOptions_HeadersWithUnderscoresAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_protocol_proto_enumTypes[0] +} + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpProtocolOptions_HeadersWithUnderscoresAction.Descriptor instead. +func (HttpProtocolOptions_HeadersWithUnderscoresAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5, 0} +} + +// [#not-implemented-hide:] +type TcpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *TcpProtocolOptions) Reset() { + *x = TcpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TcpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TcpProtocolOptions) ProtoMessage() {} + +func (x *TcpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TcpProtocolOptions.ProtoReflect.Descriptor instead. +func (*TcpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{0} +} + +// Config for keepalive probes in a QUIC connection. +// Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet +// itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. +type QuicKeepAliveSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes. + // + // If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + // + // If zero, disable keepalive probing. + // If absent, use the QUICHE default interval to probe. + MaxInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + // + // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + // + // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + InitialInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_interval,json=initialInterval,proto3" json:"initial_interval,omitempty"` +} + +func (x *QuicKeepAliveSettings) Reset() { + *x = QuicKeepAliveSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicKeepAliveSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicKeepAliveSettings) ProtoMessage() {} + +func (x *QuicKeepAliveSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicKeepAliveSettings.ProtoReflect.Descriptor instead. +func (*QuicKeepAliveSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{1} +} + +func (x *QuicKeepAliveSettings) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration { + if x != nil { + return x.InitialInterval + } + return nil +} + +// QUIC protocol options which apply to both downstream and upstream connections. +// [#next-free-field: 9] +type QuicProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + // `Initial stream-level flow-control receive window + // `_ size. Valid values range from + // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024). + // + // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. + // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the stream buffers. + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + // Similar to “initial_stream_window_size“, but for connection-level + // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults + // to 25165824 (24 * 1024 * 1024). + // + // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default + // window size now, so it's also the minimum. + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + // The number of timeouts that can occur before port migration is triggered for QUIC clients. + // This defaults to 4. If set to 0, port migration will not occur on path degrading. + // Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. + // This has no effect on server sessions. + NumTimeoutsToTriggerPortMigration *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=num_timeouts_to_trigger_port_migration,json=numTimeoutsToTriggerPortMigration,proto3" json:"num_timeouts_to_trigger_port_migration,omitempty"` + // Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. + // If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. + ConnectionKeepalive *QuicKeepAliveSettings `protobuf:"bytes,5,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` + // A comma-separated list of strings representing QUIC connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + ConnectionOptions string `protobuf:"bytes,6,opt,name=connection_options,json=connectionOptions,proto3" json:"connection_options,omitempty"` + // A comma-separated list of strings representing QUIC client connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + ClientConnectionOptions string `protobuf:"bytes,7,opt,name=client_connection_options,json=clientConnectionOptions,proto3" json:"client_connection_options,omitempty"` + // The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE + // default 600s will be applied. + // For internal corporate network, a long timeout is often fine. + // But for client facing network, 30s is usually a good choice. + IdleNetworkTimeout *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_network_timeout,json=idleNetworkTimeout,proto3" json:"idle_network_timeout,omitempty"` +} + +func (x *QuicProtocolOptions) Reset() { + *x = QuicProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicProtocolOptions) ProtoMessage() {} + +func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead. +func (*QuicProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{2} +} + +func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConcurrentStreams + } + return nil +} + +func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialStreamWindowSize + } + return nil +} + +func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialConnectionWindowSize + } + return nil +} + +func (x *QuicProtocolOptions) GetNumTimeoutsToTriggerPortMigration() *wrapperspb.UInt32Value { + if x != nil { + return x.NumTimeoutsToTriggerPortMigration + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionKeepalive() *QuicKeepAliveSettings { + if x != nil { + return x.ConnectionKeepalive + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionOptions() string { + if x != nil { + return x.ConnectionOptions + } + return "" +} + +func (x *QuicProtocolOptions) GetClientConnectionOptions() string { + if x != nil { + return x.ClientConnectionOptions + } + return "" +} + +func (x *QuicProtocolOptions) GetIdleNetworkTimeout() *durationpb.Duration { + if x != nil { + return x.IdleNetworkTimeout + } + return nil +} + +type UpstreamHttpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header or any other arbitrary + // header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"` + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // This field is intended to be set with “auto_sni“ field. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"` + // An optional alternative to the host/authority header to be used for setting the SNI value. + // It should be a valid downstream HTTP header, as seen by the + // :ref:`router filter `. + // If unset, host/authority header will be used for populating the SNI. If the specified header + // is not found or the value is empty, host/authority header will be used instead. + // This field is intended to be set with “auto_sni“ and/or “auto_san_validation“ fields. + // If none of these fields are set then setting this would be a no-op. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + OverrideAutoSniHeader string `protobuf:"bytes,3,opt,name=override_auto_sni_header,json=overrideAutoSniHeader,proto3" json:"override_auto_sni_header,omitempty"` +} + +func (x *UpstreamHttpProtocolOptions) Reset() { + *x = UpstreamHttpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamHttpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamHttpProtocolOptions) ProtoMessage() {} + +func (x *UpstreamHttpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamHttpProtocolOptions.ProtoReflect.Descriptor instead. +func (*UpstreamHttpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{3} +} + +func (x *UpstreamHttpProtocolOptions) GetAutoSni() bool { + if x != nil { + return x.AutoSni + } + return false +} + +func (x *UpstreamHttpProtocolOptions) GetAutoSanValidation() bool { + if x != nil { + return x.AutoSanValidation + } + return false +} + +func (x *UpstreamHttpProtocolOptions) GetOverrideAutoSniHeader() string { + if x != nil { + return x.OverrideAutoSniHeader + } + return "" +} + +// Configures the alternate protocols cache which tracks alternate protocols that can be used to +// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for +// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 +// for the "HTTPS" DNS resource record. +// [#next-free-field: 6] +type AlternateProtocolsCacheOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cache. Multiple named caches allow independent alternate protocols cache + // configurations to operate within a single Envoy process using different configurations. All + // alternate protocols cache options with the same name *must* be equal in all fields when + // referenced from different configuration components. Configuration will fail to load if this is + // not the case. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of entries that the cache will hold. If not specified defaults to 1024. + // + // .. note: + // + // The implementation is approximate and enforced independently on each worker thread, thus + // it is possible for the maximum entries in the cache to go slightly above the configured + // value depending on timing. This is similar to how other circuit breakers work. + MaxEntries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // Allows configuring a persistent + // :ref:`key value store ` to flush + // alternate protocols entries to disk. + // This function is currently only supported if concurrency is 1 + // Cached entries will take precedence over pre-populated entries below. + KeyValueStoreConfig *TypedExtensionConfig `protobuf:"bytes,3,opt,name=key_value_store_config,json=keyValueStoreConfig,proto3" json:"key_value_store_config,omitempty"` + // Allows pre-populating the cache with entries, as described above. + PrepopulatedEntries []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry `protobuf:"bytes,4,rep,name=prepopulated_entries,json=prepopulatedEntries,proto3" json:"prepopulated_entries,omitempty"` + // Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + // this list contained the value “.c.example.com“, then an Alt-Svc entry for “foo.c.example.com“ + // could be shared with “bar.c.example.com“ but would not be shared with “baz.example.com“. On + // the other hand, if the list contained the value “.example.com“ then all three hosts could share + // Alt-Svc entries. Each entry must start with “.“. If a hostname matches multiple suffixes, the + // first listed suffix will be used. + // + // Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + // [#not-implemented-hide:] + CanonicalSuffixes []string `protobuf:"bytes,5,rep,name=canonical_suffixes,json=canonicalSuffixes,proto3" json:"canonical_suffixes,omitempty"` +} + +func (x *AlternateProtocolsCacheOptions) Reset() { + *x = AlternateProtocolsCacheOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlternateProtocolsCacheOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlternateProtocolsCacheOptions) ProtoMessage() {} + +func (x *AlternateProtocolsCacheOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlternateProtocolsCacheOptions.ProtoReflect.Descriptor instead. +func (*AlternateProtocolsCacheOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4} +} + +func (x *AlternateProtocolsCacheOptions) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxEntries + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetKeyValueStoreConfig() *TypedExtensionConfig { + if x != nil { + return x.KeyValueStoreConfig + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetPrepopulatedEntries() []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry { + if x != nil { + return x.PrepopulatedEntries + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string { + if x != nil { + return x.CanonicalSuffixes + } + return nil +} + +// [#next-free-field: 7] +type HttpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The idle timeout for connections. The idle timeout is defined as the + // period in which there are no active requests. When the + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + // + // .. warning:: + // + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled for downstream connections according to the value for + // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. + IdleTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached, + // the drain sequence will kick-in. The connection will be closed after the drain timeout period + // if there are no active streams. See :ref:`drain_timeout + // `. + MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` + // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + // reset independent of any other timeouts. If not specified, this value is not set. + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + // Note: this only affects client headers. It does not affect headers added + // by Envoy filters and does not have any impact if added to cluster config. + HeadersWithUnderscoresAction HttpProtocolOptions_HeadersWithUnderscoresAction `protobuf:"varint,5,opt,name=headers_with_underscores_action,json=headersWithUnderscoresAction,proto3,enum=envoy.config.core.v3.HttpProtocolOptions_HeadersWithUnderscoresAction" json:"headers_with_underscores_action,omitempty"` + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` +} + +func (x *HttpProtocolOptions) Reset() { + *x = HttpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpProtocolOptions) ProtoMessage() {} + +func (x *HttpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpProtocolOptions.ProtoReflect.Descriptor instead. +func (*HttpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5} +} + +func (x *HttpProtocolOptions) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxConnectionDuration() *durationpb.Duration { + if x != nil { + return x.MaxConnectionDuration + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxHeadersCount + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOptions_HeadersWithUnderscoresAction { + if x != nil { + return x.HeadersWithUnderscoresAction + } + return HttpProtocolOptions_ALLOW +} + +func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestsPerConnection + } + return nil +} + +// [#next-free-field: 11] +type Http1ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // “http_proxy“ environment variable. + AllowAbsoluteUrl *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // “default_host_for_http_10“ is configured. + AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"` + // A default host for HTTP/1.0 requests. This is highly suggested if “accept_http_10“ is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if “accept_http_10“ is not true. + DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"` + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat *Http1ProtocolOptions_HeaderKeyFormat `protobuf:"bytes,4,opt,name=header_key_format,json=headerKeyFormat,proto3" json:"header_key_format,omitempty"` + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // Note that this only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a HEAD only request nor a HTTP Upgrade. + // - Not a response to a HEAD request. + // - The content length header is not present. + EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"` + // Allows Envoy to process requests/responses with both “Content-Length“ and “Transfer-Encoding“ + // headers set. By default such messages are rejected, but if option is enabled - Envoy will + // remove Content-Length header and process message. + // See `RFC7230, sec. 3.3.3 `_ for details. + // + // .. attention:: + // + // Enabling this option might lead to request smuggling vulnerability, especially if traffic + // is proxied via multiple layers of proxies. + // + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + AllowChunkedLength bool `protobuf:"varint,6,opt,name=allow_chunked_length,json=allowChunkedLength,proto3" json:"allow_chunked_length,omitempty"` + // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate + // HTTP/1.1 connections upon receiving an invalid HTTP message. However, + // when this option is true, then Envoy will leave the HTTP/1.1 connection + // open where possible. + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // Allows sending fully qualified URLs when proxying the first line of the + // response. By default, Envoy will only send the path components in the first line. + // If this is true, Envoy will create a fully qualified URI composing scheme + // (inferred if not present), host (from the host/:authority header) and path + // (from first line or :path header). + SendFullyQualifiedUrl bool `protobuf:"varint,8,opt,name=send_fully_qualified_url,json=sendFullyQualifiedUrl,proto3" json:"send_fully_qualified_url,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + // If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + // If unset, HTTP/1 parser is selected based on + // envoy.reloadable_features.http1_use_balsa_parser. + // See issue #21245. + UseBalsaParser *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed. + // If true, and BalsaParser is used (either `use_balsa_parser` above is true, + // or `envoy.reloadable_features.http1_use_balsa_parser` is true and + // `use_balsa_parser` is unset), then every non-empty method with only valid + // characters is accepted. Otherwise, methods not on the hard-coded list are + // rejected. + // Once UHV is enabled, this field should be removed, and BalsaParser should + // allow any method. UHV validates the method, rejecting empty string or + // invalid characters, and provides :ref:`restrict_http_methods + // ` + // to reject custom methods. + AllowCustomMethods bool `protobuf:"varint,10,opt,name=allow_custom_methods,json=allowCustomMethods,proto3" json:"allow_custom_methods,omitempty"` +} + +func (x *Http1ProtocolOptions) Reset() { + *x = Http1ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions) ProtoMessage() {} + +func (x *Http1ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6} +} + +func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrapperspb.BoolValue { + if x != nil { + return x.AllowAbsoluteUrl + } + return nil +} + +func (x *Http1ProtocolOptions) GetAcceptHttp_10() bool { + if x != nil { + return x.AcceptHttp_10 + } + return false +} + +func (x *Http1ProtocolOptions) GetDefaultHostForHttp_10() string { + if x != nil { + return x.DefaultHostForHttp_10 + } + return "" +} + +func (x *Http1ProtocolOptions) GetHeaderKeyFormat() *Http1ProtocolOptions_HeaderKeyFormat { + if x != nil { + return x.HeaderKeyFormat + } + return nil +} + +func (x *Http1ProtocolOptions) GetEnableTrailers() bool { + if x != nil { + return x.EnableTrailers + } + return false +} + +func (x *Http1ProtocolOptions) GetAllowChunkedLength() bool { + if x != nil { + return x.AllowChunkedLength + } + return false +} + +func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http1ProtocolOptions) GetSendFullyQualifiedUrl() bool { + if x != nil { + return x.SendFullyQualifiedUrl + } + return false +} + +func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrapperspb.BoolValue { + if x != nil { + return x.UseBalsaParser + } + return nil +} + +func (x *Http1ProtocolOptions) GetAllowCustomMethods() bool { + if x != nil { + return x.AllowCustomMethods + } + return false +} + +type KeepaliveSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + // If this is zero, interval PINGs will not be sent. + Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. Note that in order to prevent the influence of + // Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on + // the connection, under the assumption that if a frame is received the connection is healthy. + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + IntervalJitter *v3.Percent `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` + // If the connection has been idle for this duration, send a HTTP/2 ping ahead + // of new stream creation, to quickly detect dead connections. + // If this is zero, this type of PING will not be sent. + // If an interval ping is outstanding, a second ping will not be sent as the + // interval ping will determine if the connection is dead. + // + // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. + ConnectionIdleInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=connection_idle_interval,json=connectionIdleInterval,proto3" json:"connection_idle_interval,omitempty"` +} + +func (x *KeepaliveSettings) Reset() { + *x = KeepaliveSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepaliveSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepaliveSettings) ProtoMessage() {} + +func (x *KeepaliveSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepaliveSettings.ProtoReflect.Descriptor instead. +func (*KeepaliveSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7} +} + +func (x *KeepaliveSettings) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *KeepaliveSettings) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *KeepaliveSettings) GetIntervalJitter() *v3.Percent { + if x != nil { + return x.IntervalJitter + } + return nil +} + +func (x *KeepaliveSettings) GetConnectionIdleInterval() *durationpb.Duration { + if x != nil { + return x.ConnectionIdleInterval + } + return nil +} + +// [#next-free-field: 17] +type Http2ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + HpackTableSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"` + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). + // + // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given + // connection based on upstream settings. Config dumps will reflect the configured upper bound, + // not the per-connection negotiated limits. + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + // Similar to “initial_stream_window_size“, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as “initial_stream_window_size“. + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + // Allows proxying Websocket and other upgrades over H2 connect. + AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"` + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/2 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The “http2.outbound_flood“ stat tracks the number of terminated connections due + // to flood mitigation. The default limit is 10000. + MaxOutboundFrames *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"` + // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // “http2.outbound_control_flood“ stat tracks the number of terminated connections due to flood + // mitigation. The default limit is 1000. + MaxOutboundControlFrames *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"` + // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood“ + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to 0 will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is 1. + MaxConsecutiveInboundFramesWithEmptyPayload *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"` + // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number + // of PRIORITY frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) + // + // the connection is terminated. For downstream connections the “opened_streams“ is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connection the + // “opened_streams“ is incremented when Envoy send the HEADERS frame for a new stream. The + // “http2.inbound_priority_frames_flood“ stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 100. + MaxInboundPriorityFramesPerStream *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` + // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number + // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // 5 + 2 * (``opened_streams`` + + // ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``) + // + // the connection is terminated. For downstream connections the “opened_streams“ is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connections the + // “opened_streams“ is incremented when Envoy sends the HEADERS frame for a new stream. The + // “http2.inbound_priority_frames_flood“ stat tracks the number of connections terminated due to + // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. + // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least 2. + MaxInboundWindowUpdateFramesPerDataFrameSent *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/protocol.proto. + StreamErrorOnInvalidHttpMessaging bool `protobuf:"varint,12,opt,name=stream_error_on_invalid_http_messaging,json=streamErrorOnInvalidHttpMessaging,proto3" json:"stream_error_on_invalid_http_messaging,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // [#not-implemented-hide:] + // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: + // + // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by + // Envoy. + // + // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field + // 'allow_connect'. + // + // Note that custom parameters specified through this field can not also be set in the + // corresponding named parameters: + // + // .. code-block:: text + // + // ID Field Name + // ---------------- + // 0x1 hpack_table_size + // 0x3 max_concurrent_streams + // 0x4 initial_stream_window_size + // + // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies + // between custom parameters with the same identifier will trigger a failure. + // + // See `IANA HTTP/2 Settings + // `_ for + // standardized identifiers. + CustomSettingsParameters []*Http2ProtocolOptions_SettingsParameter `protobuf:"bytes,13,rep,name=custom_settings_parameters,json=customSettingsParameters,proto3" json:"custom_settings_parameters,omitempty"` + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + ConnectionKeepalive *KeepaliveSettings `protobuf:"bytes,15,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` + // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + UseOghttp2Codec *wrapperspb.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"` +} + +func (x *Http2ProtocolOptions) Reset() { + *x = Http2ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http2ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http2ProtocolOptions) ProtoMessage() {} + +func (x *Http2ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http2ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8} +} + +func (x *Http2ProtocolOptions) GetHpackTableSize() *wrapperspb.UInt32Value { + if x != nil { + return x.HpackTableSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConcurrentStreams + } + return nil +} + +func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialStreamWindowSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialConnectionWindowSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetAllowConnect() bool { + if x != nil { + return x.AllowConnect + } + return false +} + +func (x *Http2ProtocolOptions) GetAllowMetadata() bool { + if x != nil { + return x.AllowMetadata + } + return false +} + +func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxOutboundFrames + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxOutboundControlFrames + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConsecutiveInboundFramesWithEmptyPayload + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInboundPriorityFramesPerStream + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInboundWindowUpdateFramesPerDataFrameSent + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/protocol.proto. +func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool { + if x != nil { + return x.StreamErrorOnInvalidHttpMessaging + } + return false +} + +func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http2ProtocolOptions) GetCustomSettingsParameters() []*Http2ProtocolOptions_SettingsParameter { + if x != nil { + return x.CustomSettingsParameters + } + return nil +} + +func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings { + if x != nil { + return x.ConnectionKeepalive + } + return nil +} + +func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrapperspb.BoolValue { + if x != nil { + return x.UseOghttp2Codec + } + return nil +} + +// [#not-implemented-hide:] +type GrpcProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Http2ProtocolOptions *Http2ProtocolOptions `protobuf:"bytes,1,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` +} + +func (x *GrpcProtocolOptions) Reset() { + *x = GrpcProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcProtocolOptions) ProtoMessage() {} + +func (x *GrpcProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcProtocolOptions.ProtoReflect.Descriptor instead. +func (*GrpcProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{9} +} + +func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +// A message which allows using HTTP/3. +// [#next-free-field: 7] +type Http3ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuicProtocolOptions *QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + // the header mechanisms from the `HTTP/2 extended connect RFC + // `_ + // and settings `proposed for HTTP/3 + // `_ + // Note that HTTP/3 CONNECT is not yet an RFC. + AllowExtendedConnect bool `protobuf:"varint,5,opt,name=allow_extended_connect,json=allowExtendedConnect,proto3" json:"allow_extended_connect,omitempty"` + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/3 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` +} + +func (x *Http3ProtocolOptions) Reset() { + *x = Http3ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http3ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http3ProtocolOptions) ProtoMessage() {} + +func (x *Http3ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http3ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http3ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{10} +} + +func (x *Http3ProtocolOptions) GetQuicProtocolOptions() *QuicProtocolOptions { + if x != nil { + return x.QuicProtocolOptions + } + return nil +} + +func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http3ProtocolOptions) GetAllowExtendedConnect() bool { + if x != nil { + return x.AllowExtendedConnect + } + return false +} + +func (x *Http3ProtocolOptions) GetAllowMetadata() bool { + if x != nil { + return x.AllowMetadata + } + return false +} + +// A message to control transformations to the :scheme header +type SchemeHeaderTransformation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Transformation: + // + // *SchemeHeaderTransformation_SchemeToOverwrite + Transformation isSchemeHeaderTransformation_Transformation `protobuf_oneof:"transformation"` + // Set the Scheme header to match the upstream transport protocol. For example, should a + // request be sent to the upstream over TLS, the scheme header will be set to "https". Should the + // request be sent over plaintext, the scheme header will be set to "http". + // If scheme_to_overwrite is set, this field is not used. + MatchUpstream bool `protobuf:"varint,2,opt,name=match_upstream,json=matchUpstream,proto3" json:"match_upstream,omitempty"` +} + +func (x *SchemeHeaderTransformation) Reset() { + *x = SchemeHeaderTransformation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemeHeaderTransformation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemeHeaderTransformation) ProtoMessage() {} + +func (x *SchemeHeaderTransformation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemeHeaderTransformation.ProtoReflect.Descriptor instead. +func (*SchemeHeaderTransformation) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{11} +} + +func (m *SchemeHeaderTransformation) GetTransformation() isSchemeHeaderTransformation_Transformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (x *SchemeHeaderTransformation) GetSchemeToOverwrite() string { + if x, ok := x.GetTransformation().(*SchemeHeaderTransformation_SchemeToOverwrite); ok { + return x.SchemeToOverwrite + } + return "" +} + +func (x *SchemeHeaderTransformation) GetMatchUpstream() bool { + if x != nil { + return x.MatchUpstream + } + return false +} + +type isSchemeHeaderTransformation_Transformation interface { + isSchemeHeaderTransformation_Transformation() +} + +type SchemeHeaderTransformation_SchemeToOverwrite struct { + // Overwrite any Scheme header with the contents of this string. + // If set, takes precedence over match_upstream. + SchemeToOverwrite string `protobuf:"bytes,1,opt,name=scheme_to_overwrite,json=schemeToOverwrite,proto3,oneof"` +} + +func (*SchemeHeaderTransformation_SchemeToOverwrite) isSchemeHeaderTransformation_Transformation() {} + +// Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime. +// This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not +// advertised HTTP/3 support. These entries will be overwritten by alt-svc +// response headers or cached values. +// As with regular cached entries, if the origin response would result in clearing an existing +// alternate protocol cache entry, pre-populated entries will also be cleared. +// Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting +// response headers +// alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123 +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The host name for the alternate protocol entry. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // The port for the alternate protocol entry. + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Reset() { + *x = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoMessage() {} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ProtoReflect.Descriptor instead. +func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +// [#next-free-field: 9] +type Http1ProtocolOptions_HeaderKeyFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to HeaderFormat: + // + // *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ + // *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter + HeaderFormat isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat `protobuf_oneof:"header_format"` +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) Reset() { + *x = Http1ProtocolOptions_HeaderKeyFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions_HeaderKeyFormat) ProtoMessage() {} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions_HeaderKeyFormat) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0} +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) GetHeaderFormat() isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat { + if m != nil { + return m.HeaderFormat + } + return nil +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) GetProperCaseWords() *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords { + if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { + return x.ProperCaseWords + } + return nil +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) GetStatefulFormatter() *TypedExtensionConfig { + if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok { + return x.StatefulFormatter + } + return nil +} + +type isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat interface { + isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() +} + +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ struct { + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + // Note that while this results in most headers following conventional casing, certain headers + // are not covered. For example, the "TE" header will be formatted as "Te". + ProperCaseWords *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords `protobuf:"bytes,1,opt,name=proper_case_words,json=properCaseWords,proto3,oneof"` +} + +type Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter struct { + // Configuration for stateful formatter extensions that allow using received headers to + // affect the output of encoding headers. E.g., preserving case during proxying. + // [#extension-category: envoy.http.stateful_header_formatters] + StatefulFormatter *TypedExtensionConfig `protobuf:"bytes,8,opt,name=stateful_formatter,json=statefulFormatter,proto3,oneof"` +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() { +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() { +} + +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Reset() { + *x = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoMessage() {} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0, 0} +} + +// Defines a parameter to be sent in the SETTINGS frame. +// See `RFC7540, sec. 6.5.1 `_ for details. +type Http2ProtocolOptions_SettingsParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The 16 bit parameter identifier. + Identifier *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // The 32 bit parameter value. + Value *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Http2ProtocolOptions_SettingsParameter) Reset() { + *x = Http2ProtocolOptions_SettingsParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http2ProtocolOptions_SettingsParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http2ProtocolOptions_SettingsParameter) ProtoMessage() {} + +func (x *Http2ProtocolOptions_SettingsParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http2ProtocolOptions_SettingsParameter.ProtoReflect.Descriptor instead. +func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrapperspb.UInt32Value { + if x != nil { + return x.Identifier + } + return nil +} + +func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrapperspb.UInt32Value { + if x != nil { + return x.Value + } + return nil +} + +var File_envoy_config_core_v3_protocol_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x41, 0x0a, 0x12, 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x15, 0x51, 0x75, 0x69, 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41, + 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4a, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xf1, 0x05, 0x0a, + 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x12, 0x67, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x08, 0x28, + 0x01, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6f, 0x0a, 0x1e, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x0c, 0x28, 0x01, 0x52, 0x1b, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x7a, 0x0a, 0x26, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, + 0x18, 0x05, 0x28, 0x00, 0x52, 0x21, 0x6e, 0x75, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x73, 0x54, 0x6f, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, + 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x0c, + 0xaa, 0x01, 0x09, 0x22, 0x03, 0x08, 0xd8, 0x04, 0x32, 0x02, 0x08, 0x01, 0x52, 0x12, 0x69, 0x64, + 0x6c, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, + 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, + 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, + 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, + 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, + 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, + 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, + 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, + 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, + 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, + 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, 0x0e, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, + 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, + 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, + 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, + 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, + 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, + 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, + 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, + 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, 0x0a, 0x11, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, 0x06, 0x18, + 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, + 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x10, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, + 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_protocol_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_protocol_proto_rawDescData = file_envoy_config_core_v3_protocol_proto_rawDesc +) + +func file_envoy_config_core_v3_protocol_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_protocol_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_protocol_proto_rawDescData) + }) + return file_envoy_config_core_v3_protocol_proto_rawDescData +} + +var file_envoy_config_core_v3_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{ + (HttpProtocolOptions_HeadersWithUnderscoresAction)(0), // 0: envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + (*TcpProtocolOptions)(nil), // 1: envoy.config.core.v3.TcpProtocolOptions + (*QuicKeepAliveSettings)(nil), // 2: envoy.config.core.v3.QuicKeepAliveSettings + (*QuicProtocolOptions)(nil), // 3: envoy.config.core.v3.QuicProtocolOptions + (*UpstreamHttpProtocolOptions)(nil), // 4: envoy.config.core.v3.UpstreamHttpProtocolOptions + (*AlternateProtocolsCacheOptions)(nil), // 5: envoy.config.core.v3.AlternateProtocolsCacheOptions + (*HttpProtocolOptions)(nil), // 6: envoy.config.core.v3.HttpProtocolOptions + (*Http1ProtocolOptions)(nil), // 7: envoy.config.core.v3.Http1ProtocolOptions + (*KeepaliveSettings)(nil), // 8: envoy.config.core.v3.KeepaliveSettings + (*Http2ProtocolOptions)(nil), // 9: envoy.config.core.v3.Http2ProtocolOptions + (*GrpcProtocolOptions)(nil), // 10: envoy.config.core.v3.GrpcProtocolOptions + (*Http3ProtocolOptions)(nil), // 11: envoy.config.core.v3.Http3ProtocolOptions + (*SchemeHeaderTransformation)(nil), // 12: envoy.config.core.v3.SchemeHeaderTransformation + (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry)(nil), // 13: envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 14: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 15: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value + (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*v3.Percent)(nil), // 21: envoy.type.v3.Percent +} +var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ + 17, // 0: envoy.config.core.v3.QuicKeepAliveSettings.max_interval:type_name -> google.protobuf.Duration + 17, // 1: envoy.config.core.v3.QuicKeepAliveSettings.initial_interval:type_name -> google.protobuf.Duration + 18, // 2: envoy.config.core.v3.QuicProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 3: envoy.config.core.v3.QuicProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 4: envoy.config.core.v3.QuicProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value + 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings + 17, // 7: envoy.config.core.v3.QuicProtocolOptions.idle_network_timeout:type_name -> google.protobuf.Duration + 18, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value + 19, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + 17, // 11: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 17, // 12: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration + 18, // 13: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value + 17, // 14: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration + 0, // 15: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + 18, // 16: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 20, // 17: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue + 14, // 18: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 20, // 20: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue + 17, // 21: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration + 17, // 22: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration + 21, // 23: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent + 17, // 24: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration + 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value + 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value + 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value + 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value + 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value + 18, // 33: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value + 20, // 34: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 16, // 35: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + 8, // 36: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings + 20, // 37: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue + 9, // 38: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 3, // 39: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 20, // 40: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 15, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + 19, // 42: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value + 18, // 44: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value + 45, // [45:45] is the sub-list for method output_type + 45, // [45:45] is the sub-list for method input_type + 45, // [45:45] is the sub-list for extension type_name + 45, // [45:45] is the sub-list for extension extendee + 0, // [0:45] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_protocol_proto_init() } +func file_envoy_config_core_v3_protocol_proto_init() { + if File_envoy_config_core_v3_protocol_proto != nil { + return + } + file_envoy_config_core_v3_extension_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TcpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicKeepAliveSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamHttpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlternateProtocolsCacheOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepaliveSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http2ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http3ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemeHeaderTransformation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http2ProtocolOptions_SettingsParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SchemeHeaderTransformation_SchemeToOverwrite)(nil), + } + file_envoy_config_core_v3_protocol_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_)(nil), + (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_protocol_proto_rawDesc, + NumEnums: 1, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_protocol_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_protocol_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_protocol_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_protocol_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_protocol_proto = out.File + file_envoy_config_core_v3_protocol_proto_rawDesc = nil + file_envoy_config_core_v3_protocol_proto_goTypes = nil + file_envoy_config_core_v3_protocol_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go new file mode 100644 index 000000000..d0d0be643 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -0,0 +1,3015 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TcpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TcpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TcpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TcpProtocolOptionsMultiError, or nil if none found. +func (m *TcpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *TcpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return TcpProtocolOptionsMultiError(errors) + } + + return nil +} + +// TcpProtocolOptionsMultiError is an error wrapping multiple validation errors +// returned by TcpProtocolOptions.ValidateAll() if the designated constraints +// aren't met. +type TcpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TcpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TcpProtocolOptionsMultiError) AllErrors() []error { return m } + +// TcpProtocolOptionsValidationError is the validation error returned by +// TcpProtocolOptions.Validate if the designated constraints aren't met. +type TcpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TcpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TcpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TcpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TcpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TcpProtocolOptionsValidationError) ErrorName() string { + return "TcpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e TcpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTcpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TcpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TcpProtocolOptionsValidationError{} + +// Validate checks the field values on QuicKeepAliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicKeepAliveSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicKeepAliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicKeepAliveSettingsMultiError, or nil if none found. +func (m *QuicKeepAliveSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicKeepAliveSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicKeepAliveSettingsValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(0*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur > lte && dur < gte { + err := QuicKeepAliveSettingsValidationError{ + field: "MaxInterval", + reason: "value must be outside range (0s, 1s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetInitialInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicKeepAliveSettingsValidationError{ + field: "InitialInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(0*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur > lte && dur < gte { + err := QuicKeepAliveSettingsValidationError{ + field: "InitialInterval", + reason: "value must be outside range (0s, 1s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return QuicKeepAliveSettingsMultiError(errors) + } + + return nil +} + +// QuicKeepAliveSettingsMultiError is an error wrapping multiple validation +// errors returned by QuicKeepAliveSettings.ValidateAll() if the designated +// constraints aren't met. +type QuicKeepAliveSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicKeepAliveSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicKeepAliveSettingsMultiError) AllErrors() []error { return m } + +// QuicKeepAliveSettingsValidationError is the validation error returned by +// QuicKeepAliveSettings.Validate if the designated constraints aren't met. +type QuicKeepAliveSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicKeepAliveSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicKeepAliveSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicKeepAliveSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicKeepAliveSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicKeepAliveSettingsValidationError) ErrorName() string { + return "QuicKeepAliveSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicKeepAliveSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicKeepAliveSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicKeepAliveSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicKeepAliveSettingsValidationError{} + +// Validate checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicProtocolOptionsMultiError, or nil if none found. +func (m *QuicProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := QuicProtocolOptionsValidationError{ + field: "MaxConcurrentStreams", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 16777216 { + err := QuicProtocolOptionsValidationError{ + field: "InitialStreamWindowSize", + reason: "value must be inside range [1, 16777216]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 25165824 { + err := QuicProtocolOptionsValidationError{ + field: "InitialConnectionWindowSize", + reason: "value must be inside range [1, 25165824]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetNumTimeoutsToTriggerPortMigration(); wrapper != nil { + + if val := wrapper.GetValue(); val < 0 || val > 5 { + err := QuicProtocolOptionsValidationError{ + field: "NumTimeoutsToTriggerPortMigration", + reason: "value must be inside range [0, 5]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetConnectionKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConnectionOptions + + // no validation rules for ClientConnectionOptions + + if d := m.GetIdleNetworkTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(600*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur < gte || dur > lte { + err := QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value must be inside range [1s, 10m0s]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return QuicProtocolOptionsMultiError(errors) + } + + return nil +} + +// QuicProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by QuicProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type QuicProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicProtocolOptionsMultiError) AllErrors() []error { return m } + +// QuicProtocolOptionsValidationError is the validation error returned by +// QuicProtocolOptions.Validate if the designated constraints aren't met. +type QuicProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicProtocolOptionsValidationError) ErrorName() string { + return "QuicProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicProtocolOptionsValidationError{} + +// Validate checks the field values on UpstreamHttpProtocolOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamHttpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamHttpProtocolOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamHttpProtocolOptionsMultiError, or nil if none found. +func (m *UpstreamHttpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamHttpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AutoSni + + // no validation rules for AutoSanValidation + + if m.GetOverrideAutoSniHeader() != "" { + + if !_UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern.MatchString(m.GetOverrideAutoSniHeader()) { + err := UpstreamHttpProtocolOptionsValidationError{ + field: "OverrideAutoSniHeader", + reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return UpstreamHttpProtocolOptionsMultiError(errors) + } + + return nil +} + +// UpstreamHttpProtocolOptionsMultiError is an error wrapping multiple +// validation errors returned by UpstreamHttpProtocolOptions.ValidateAll() if +// the designated constraints aren't met. +type UpstreamHttpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamHttpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamHttpProtocolOptionsMultiError) AllErrors() []error { return m } + +// UpstreamHttpProtocolOptionsValidationError is the validation error returned +// by UpstreamHttpProtocolOptions.Validate if the designated constraints +// aren't met. +type UpstreamHttpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamHttpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamHttpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamHttpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamHttpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamHttpProtocolOptionsValidationError) ErrorName() string { + return "UpstreamHttpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamHttpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamHttpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamHttpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamHttpProtocolOptionsValidationError{} + +var _UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$") + +// Validate checks the field values on AlternateProtocolsCacheOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AlternateProtocolsCacheOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AlternateProtocolsCacheOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// AlternateProtocolsCacheOptionsMultiError, or nil if none found. +func (m *AlternateProtocolsCacheOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *AlternateProtocolsCacheOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := AlternateProtocolsCacheOptionsValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMaxEntries(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := AlternateProtocolsCacheOptionsValidationError{ + field: "MaxEntries", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetKeyValueStoreConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyValueStoreConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetPrepopulatedEntries() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return AlternateProtocolsCacheOptionsMultiError(errors) + } + + return nil +} + +// AlternateProtocolsCacheOptionsMultiError is an error wrapping multiple +// validation errors returned by AlternateProtocolsCacheOptions.ValidateAll() +// if the designated constraints aren't met. +type AlternateProtocolsCacheOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AlternateProtocolsCacheOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AlternateProtocolsCacheOptionsMultiError) AllErrors() []error { return m } + +// AlternateProtocolsCacheOptionsValidationError is the validation error +// returned by AlternateProtocolsCacheOptions.Validate if the designated +// constraints aren't met. +type AlternateProtocolsCacheOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AlternateProtocolsCacheOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AlternateProtocolsCacheOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AlternateProtocolsCacheOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AlternateProtocolsCacheOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AlternateProtocolsCacheOptionsValidationError) ErrorName() string { + return "AlternateProtocolsCacheOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e AlternateProtocolsCacheOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAlternateProtocolsCacheOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AlternateProtocolsCacheOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AlternateProtocolsCacheOptionsValidationError{} + +// Validate checks the field values on HttpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpProtocolOptionsMultiError, or nil if none found. +func (m *HttpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxConnectionDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnectionDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxHeadersCount(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := HttpProtocolOptionsValidationError{ + field: "MaxHeadersCount", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HeadersWithUnderscoresAction + + if all { + switch v := interface{}(m.GetMaxRequestsPerConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequestsPerConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpProtocolOptionsMultiError(errors) + } + + return nil +} + +// HttpProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by HttpProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type HttpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpProtocolOptionsMultiError) AllErrors() []error { return m } + +// HttpProtocolOptionsValidationError is the validation error returned by +// HttpProtocolOptions.Validate if the designated constraints aren't met. +type HttpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpProtocolOptionsValidationError) ErrorName() string { + return "HttpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpProtocolOptionsValidationError{} + +// Validate checks the field values on Http1ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http1ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http1ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http1ProtocolOptionsMultiError, or nil if none found. +func (m *Http1ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAllowAbsoluteUrl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowAbsoluteUrl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AcceptHttp_10 + + // no validation rules for DefaultHostForHttp_10 + + if all { + switch v := interface{}(m.GetHeaderKeyFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderKeyFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableTrailers + + // no validation rules for AllowChunkedLength + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SendFullyQualifiedUrl + + if all { + switch v := interface{}(m.GetUseBalsaParser()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseBalsaParser()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowCustomMethods + + if len(errors) > 0 { + return Http1ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http1ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http1ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptionsValidationError is the validation error returned by +// Http1ProtocolOptions.Validate if the designated constraints aren't met. +type Http1ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http1ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http1ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http1ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptionsValidationError) ErrorName() string { + return "Http1ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptionsValidationError{} + +// Validate checks the field values on KeepaliveSettings with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *KeepaliveSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeepaliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KeepaliveSettingsMultiError, or nil if none found. +func (m *KeepaliveSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *KeepaliveSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "Interval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetTimeout() == nil { + err := KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetIntervalJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetConnectionIdleInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "ConnectionIdleInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "ConnectionIdleInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return KeepaliveSettingsMultiError(errors) + } + + return nil +} + +// KeepaliveSettingsMultiError is an error wrapping multiple validation errors +// returned by KeepaliveSettings.ValidateAll() if the designated constraints +// aren't met. +type KeepaliveSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeepaliveSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeepaliveSettingsMultiError) AllErrors() []error { return m } + +// KeepaliveSettingsValidationError is the validation error returned by +// KeepaliveSettings.Validate if the designated constraints aren't met. +type KeepaliveSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeepaliveSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeepaliveSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeepaliveSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeepaliveSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeepaliveSettingsValidationError) ErrorName() string { + return "KeepaliveSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e KeepaliveSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeepaliveSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeepaliveSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeepaliveSettingsValidationError{} + +// Validate checks the field values on Http2ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http2ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http2ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http2ProtocolOptionsMultiError, or nil if none found. +func (m *Http2ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http2ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHpackTableSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHpackTableSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxConcurrentStreams", + reason: "value must be inside range [1, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "InitialStreamWindowSize", + reason: "value must be inside range [65535, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "InitialConnectionWindowSize", + reason: "value must be inside range [65535, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for AllowConnect + + // no validation rules for AllowMetadata + + if wrapper := m.GetMaxOutboundFrames(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxOutboundFrames", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetMaxOutboundControlFrames(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxOutboundControlFrames", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxInboundWindowUpdateFramesPerDataFrameSent(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxInboundWindowUpdateFramesPerDataFrameSent", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for StreamErrorOnInvalidHttpMessaging + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomSettingsParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetConnectionKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseOghttp2Codec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseOghttp2Codec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Http2ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http2ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http2ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http2ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http2ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http2ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http2ProtocolOptionsValidationError is the validation error returned by +// Http2ProtocolOptions.Validate if the designated constraints aren't met. +type Http2ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http2ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http2ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http2ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http2ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http2ProtocolOptionsValidationError) ErrorName() string { + return "Http2ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http2ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp2ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http2ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http2ProtocolOptionsValidationError{} + +// Validate checks the field values on GrpcProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcProtocolOptionsMultiError, or nil if none found. +func (m *GrpcProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcProtocolOptionsMultiError(errors) + } + + return nil +} + +// GrpcProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by GrpcProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type GrpcProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcProtocolOptionsMultiError) AllErrors() []error { return m } + +// GrpcProtocolOptionsValidationError is the validation error returned by +// GrpcProtocolOptions.Validate if the designated constraints aren't met. +type GrpcProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcProtocolOptionsValidationError) ErrorName() string { + return "GrpcProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcProtocolOptionsValidationError{} + +// Validate checks the field values on Http3ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http3ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http3ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http3ProtocolOptionsMultiError, or nil if none found. +func (m *Http3ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http3ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetQuicProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowExtendedConnect + + // no validation rules for AllowMetadata + + if len(errors) > 0 { + return Http3ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http3ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http3ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http3ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http3ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http3ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http3ProtocolOptionsValidationError is the validation error returned by +// Http3ProtocolOptions.Validate if the designated constraints aren't met. +type Http3ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http3ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http3ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http3ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http3ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http3ProtocolOptionsValidationError) ErrorName() string { + return "Http3ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http3ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp3ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http3ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http3ProtocolOptionsValidationError{} + +// Validate checks the field values on SchemeHeaderTransformation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SchemeHeaderTransformation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SchemeHeaderTransformation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SchemeHeaderTransformationMultiError, or nil if none found. +func (m *SchemeHeaderTransformation) ValidateAll() error { + return m.validate(true) +} + +func (m *SchemeHeaderTransformation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MatchUpstream + + switch v := m.Transformation.(type) { + case *SchemeHeaderTransformation_SchemeToOverwrite: + if v == nil { + err := SchemeHeaderTransformationValidationError{ + field: "Transformation", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := _SchemeHeaderTransformation_SchemeToOverwrite_InLookup[m.GetSchemeToOverwrite()]; !ok { + err := SchemeHeaderTransformationValidationError{ + field: "SchemeToOverwrite", + reason: "value must be in list [http https]", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SchemeHeaderTransformationMultiError(errors) + } + + return nil +} + +// SchemeHeaderTransformationMultiError is an error wrapping multiple +// validation errors returned by SchemeHeaderTransformation.ValidateAll() if +// the designated constraints aren't met. +type SchemeHeaderTransformationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SchemeHeaderTransformationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SchemeHeaderTransformationMultiError) AllErrors() []error { return m } + +// SchemeHeaderTransformationValidationError is the validation error returned +// by SchemeHeaderTransformation.Validate if the designated constraints aren't met. +type SchemeHeaderTransformationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SchemeHeaderTransformationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SchemeHeaderTransformationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SchemeHeaderTransformationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SchemeHeaderTransformationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SchemeHeaderTransformationValidationError) ErrorName() string { + return "SchemeHeaderTransformationValidationError" +} + +// Error satisfies the builtin error interface +func (e SchemeHeaderTransformationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSchemeHeaderTransformation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SchemeHeaderTransformationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SchemeHeaderTransformationValidationError{} + +var _SchemeHeaderTransformation_SchemeToOverwrite_InLookup = map[string]struct{}{ + "http": {}, + "https": {}, +} + +// Validate checks the field values on +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError, or +// nil if none found. +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHostname() != "" { + + if !_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern.MatchString(m.GetHostname()) { + err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{ + field: "Hostname", + reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if val := m.GetPort(); val <= 0 || val >= 65535 { + err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{ + field: "Port", + reason: "value must be inside range (0, 65535)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError(errors) + } + + return nil +} + +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError is an +// error wrapping multiple validation errors returned by +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ValidateAll() +// if the designated constraints aren't met. +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) AllErrors() []error { + return m +} + +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError +// is the validation error returned by +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.Validate if the +// designated constraints aren't met. +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) ErrorName() string { + return "AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{} + +var _AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$") + +// Validate checks the field values on Http1ProtocolOptions_HeaderKeyFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Http1ProtocolOptions_HeaderKeyFormat) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http1ProtocolOptions_HeaderKeyFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// Http1ProtocolOptions_HeaderKeyFormatMultiError, or nil if none found. +func (m *Http1ProtocolOptions_HeaderKeyFormat) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofHeaderFormatPresent := false + switch v := m.HeaderFormat.(type) { + case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true + + if all { + switch v := interface{}(m.GetProperCaseWords()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProperCaseWords()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true + + if all { + switch v := interface{}(m.GetStatefulFormatter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatefulFormatter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofHeaderFormatPresent { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Http1ProtocolOptions_HeaderKeyFormatMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptions_HeaderKeyFormatMultiError is an error wrapping multiple +// validation errors returned by +// Http1ProtocolOptions_HeaderKeyFormat.ValidateAll() if the designated +// constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormatMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptions_HeaderKeyFormatValidationError is the validation error +// returned by Http1ProtocolOptions_HeaderKeyFormat.Validate if the designated +// constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormatValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) ErrorName() string { + return "Http1ProtocolOptions_HeaderKeyFormatValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions_HeaderKeyFormat.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptions_HeaderKeyFormatValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptions_HeaderKeyFormatValidationError{} + +// Validate checks the field values on +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError, or nil if +// none found. +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError is an error +// wrapping multiple validation errors returned by +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ValidateAll() if the +// designated constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError is the +// validation error returned by +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.Validate if the +// designated constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) ErrorName() string { + return "Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} + +// Validate checks the field values on Http2ProtocolOptions_SettingsParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Http2ProtocolOptions_SettingsParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Http2ProtocolOptions_SettingsParameter with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Http2ProtocolOptions_SettingsParameterMultiError, or nil if none found. +func (m *Http2ProtocolOptions_SettingsParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *Http2ProtocolOptions_SettingsParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetIdentifier(); wrapper != nil { + + if val := wrapper.GetValue(); val < 0 || val > 65535 { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Identifier", + reason: "value must be inside range [0, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } else { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Identifier", + reason: "value is required and must not be nil.", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetValue() == nil { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Http2ProtocolOptions_SettingsParameterMultiError(errors) + } + + return nil +} + +// Http2ProtocolOptions_SettingsParameterMultiError is an error wrapping +// multiple validation errors returned by +// Http2ProtocolOptions_SettingsParameter.ValidateAll() if the designated +// constraints aren't met. +type Http2ProtocolOptions_SettingsParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http2ProtocolOptions_SettingsParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http2ProtocolOptions_SettingsParameterMultiError) AllErrors() []error { return m } + +// Http2ProtocolOptions_SettingsParameterValidationError is the validation +// error returned by Http2ProtocolOptions_SettingsParameter.Validate if the +// designated constraints aren't met. +type Http2ProtocolOptions_SettingsParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http2ProtocolOptions_SettingsParameterValidationError) ErrorName() string { + return "Http2ProtocolOptions_SettingsParameterValidationError" +} + +// Error satisfies the builtin error interface +func (e Http2ProtocolOptions_SettingsParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp2ProtocolOptions_SettingsParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http2ProtocolOptions_SettingsParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http2ProtocolOptions_SettingsParameterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go new file mode 100644 index 000000000..131aa9d32 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go @@ -0,0 +1,1718 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TcpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *QuicKeepAliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicKeepAliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicKeepAliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InitialInterval != nil { + size, err := (*durationpb.Duration)(m.InitialInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IdleNetworkTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleNetworkTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.ClientConnectionOptions) > 0 { + i -= len(m.ClientConnectionOptions) + copy(dAtA[i:], m.ClientConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientConnectionOptions))) + i-- + dAtA[i] = 0x3a + } + if len(m.ConnectionOptions) > 0 { + i -= len(m.ConnectionOptions) + copy(dAtA[i:], m.ConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionOptions))) + i-- + dAtA[i] = 0x32 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.NumTimeoutsToTriggerPortMigration != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamHttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.OverrideAutoSniHeader) > 0 { + i -= len(m.OverrideAutoSniHeader) + copy(dAtA[i:], m.OverrideAutoSniHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideAutoSniHeader))) + i-- + dAtA[i] = 0x1a + } + if m.AutoSanValidation { + i-- + if m.AutoSanValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AutoSni { + i-- + if m.AutoSni { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Port != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CanonicalSuffixes) > 0 { + for iNdEx := len(m.CanonicalSuffixes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CanonicalSuffixes[iNdEx]) + copy(dAtA[i:], m.CanonicalSuffixes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CanonicalSuffixes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.PrepopulatedEntries) > 0 { + for iNdEx := len(m.PrepopulatedEntries) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PrepopulatedEntries[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.KeyValueStoreConfig != nil { + size, err := m.KeyValueStoreConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxEntries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEntries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HeadersWithUnderscoresAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeadersWithUnderscoresAction)) + i-- + dAtA[i] = 0x28 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxConnectionDuration != nil { + size, err := (*durationpb.Duration)(m.MaxConnectionDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxHeadersCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxHeadersCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProperCaseWords != nil { + size, err := m.ProperCaseWords.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatefulFormatter != nil { + size, err := m.StatefulFormatter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowCustomMethods { + i-- + if m.AllowCustomMethods { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.UseBalsaParser != nil { + size, err := (*wrapperspb.BoolValue)(m.UseBalsaParser).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SendFullyQualifiedUrl { + i-- + if m.SendFullyQualifiedUrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowChunkedLength { + i-- + if m.AllowChunkedLength { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EnableTrailers { + i-- + if m.EnableTrailers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.HeaderKeyFormat != nil { + size, err := m.HeaderKeyFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.DefaultHostForHttp_10) > 0 { + i -= len(m.DefaultHostForHttp_10) + copy(dAtA[i:], m.DefaultHostForHttp_10) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultHostForHttp_10))) + i-- + dAtA[i] = 0x1a + } + if m.AcceptHttp_10 { + i-- + if m.AcceptHttp_10 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AllowAbsoluteUrl != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeepaliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeepaliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeepaliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionIdleInterval != nil { + size, err := (*durationpb.Duration)(m.ConnectionIdleInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + if vtmsg, ok := interface{}(m.IntervalJitter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.IntervalJitter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := (*wrapperspb.UInt32Value)(m.Value).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Identifier != nil { + size, err := (*wrapperspb.UInt32Value)(m.Identifier).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseOghttp2Codec != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOghttp2Codec).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.CustomSettingsParameters) > 0 { + for iNdEx := len(m.CustomSettingsParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CustomSettingsParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.StreamErrorOnInvalidHttpMessaging { + i-- + if m.StreamErrorOnInvalidHttpMessaging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MaxInboundPriorityFramesPerStream != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.MaxOutboundControlFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxOutboundFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowConnect { + i-- + if m.AllowConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HpackTableSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.HpackTableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http2ProtocolOptions != nil { + size, err := m.Http2ProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http3ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http3ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http3ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowExtendedConnect { + i-- + if m.AllowExtendedConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + size, err := m.QuicProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemeHeaderTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchUpstream { + i-- + if m.MatchUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.Transformation.(*SchemeHeaderTransformation_SchemeToOverwrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeToOverwrite) + copy(dAtA[i:], m.SchemeToOverwrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeToOverwrite))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TcpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *QuicKeepAliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialInterval != nil { + l = (*durationpb.Duration)(m.InitialInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumTimeoutsToTriggerPortMigration != nil { + l = (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClientConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleNetworkTimeout != nil { + l = (*durationpb.Duration)(m.IdleNetworkTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamHttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoSni { + n += 2 + } + if m.AutoSanValidation { + n += 2 + } + l = len(m.OverrideAutoSniHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Port)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEntries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEntries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyValueStoreConfig != nil { + l = m.KeyValueStoreConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PrepopulatedEntries) > 0 { + for _, e := range m.PrepopulatedEntries { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CanonicalSuffixes) > 0 { + for _, s := range m.CanonicalSuffixes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxHeadersCount != nil { + l = (*wrapperspb.UInt32Value)(m.MaxHeadersCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConnectionDuration != nil { + l = (*durationpb.Duration)(m.MaxConnectionDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeadersWithUnderscoresAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeadersWithUnderscoresAction)) + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HeaderFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProperCaseWords != nil { + l = m.ProperCaseWords.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatefulFormatter != nil { + l = m.StatefulFormatter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowAbsoluteUrl != nil { + l = (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AcceptHttp_10 { + n += 2 + } + l = len(m.DefaultHostForHttp_10) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeaderKeyFormat != nil { + l = m.HeaderKeyFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableTrailers { + n += 2 + } + if m.AllowChunkedLength { + n += 2 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendFullyQualifiedUrl { + n += 2 + } + if m.UseBalsaParser != nil { + l = (*wrapperspb.BoolValue)(m.UseBalsaParser).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCustomMethods { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *KeepaliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + if size, ok := interface{}(m.IntervalJitter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.IntervalJitter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdleInterval != nil { + l = (*durationpb.Duration)(m.ConnectionIdleInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions_SettingsParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Identifier != nil { + l = (*wrapperspb.UInt32Value)(m.Identifier).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Value != nil { + l = (*wrapperspb.UInt32Value)(m.Value).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HpackTableSize != nil { + l = (*wrapperspb.UInt32Value)(m.HpackTableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + if m.MaxOutboundFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxOutboundControlFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundPriorityFramesPerStream != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StreamErrorOnInvalidHttpMessaging { + n += 2 + } + if len(m.CustomSettingsParameters) > 0 { + for _, e := range m.CustomSettingsParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseOghttp2Codec != nil { + l = (*wrapperspb.BoolValue)(m.UseOghttp2Codec).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http2ProtocolOptions != nil { + l = m.Http2ProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http3ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + l = m.QuicProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExtendedConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Transformation.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MatchUpstream { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeToOverwrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go new file mode 100644 index 000000000..43e7d7706 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ProxyProtocolPassThroughTLVs_PassTLVsMatchType int32 + +const ( + // Pass all TLVs. + ProxyProtocolPassThroughTLVs_INCLUDE_ALL ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 0 + // Pass specific TLVs defined in tlv_type. + ProxyProtocolPassThroughTLVs_INCLUDE ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 1 +) + +// Enum value maps for ProxyProtocolPassThroughTLVs_PassTLVsMatchType. +var ( + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_name = map[int32]string{ + 0: "INCLUDE_ALL", + 1: "INCLUDE", + } + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_value = map[string]int32{ + "INCLUDE_ALL": 0, + "INCLUDE": 1, + } +) + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Enum() *ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + p := new(ProxyProtocolPassThroughTLVs_PassTLVsMatchType) + *p = x + return p +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor() +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0] +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs_PassTLVsMatchType.Descriptor instead. +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0} +} + +type ProxyProtocolConfig_Version int32 + +const ( + // PROXY protocol version 1. Human readable format. + ProxyProtocolConfig_V1 ProxyProtocolConfig_Version = 0 + // PROXY protocol version 2. Binary format. + ProxyProtocolConfig_V2 ProxyProtocolConfig_Version = 1 +) + +// Enum value maps for ProxyProtocolConfig_Version. +var ( + ProxyProtocolConfig_Version_name = map[int32]string{ + 0: "V1", + 1: "V2", + } + ProxyProtocolConfig_Version_value = map[string]int32{ + "V1": 0, + "V2": 1, + } +) + +func (x ProxyProtocolConfig_Version) Enum() *ProxyProtocolConfig_Version { + p := new(ProxyProtocolConfig_Version) + *p = x + return p +} + +func (x ProxyProtocolConfig_Version) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyProtocolConfig_Version) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1].Descriptor() +} + +func (ProxyProtocolConfig_Version) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1] +} + +func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyProtocolConfig_Version.Descriptor instead. +func (ProxyProtocolConfig_Version) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1, 0} +} + +type ProxyProtocolPassThroughTLVs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The strategy to pass through TLVs. Default is INCLUDE_ALL. + // If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + MatchType ProxyProtocolPassThroughTLVs_PassTLVsMatchType `protobuf:"varint,1,opt,name=match_type,json=matchType,proto3,enum=envoy.config.core.v3.ProxyProtocolPassThroughTLVs_PassTLVsMatchType" json:"match_type,omitempty"` + // The TLV types that are applied based on match_type. + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + TlvType []uint32 `protobuf:"varint,2,rep,packed,name=tlv_type,json=tlvType,proto3" json:"tlv_type,omitempty"` +} + +func (x *ProxyProtocolPassThroughTLVs) Reset() { + *x = ProxyProtocolPassThroughTLVs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyProtocolPassThroughTLVs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyProtocolPassThroughTLVs) ProtoMessage() {} + +func (x *ProxyProtocolPassThroughTLVs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs.ProtoReflect.Descriptor instead. +func (*ProxyProtocolPassThroughTLVs) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0} +} + +func (x *ProxyProtocolPassThroughTLVs) GetMatchType() ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + if x != nil { + return x.MatchType + } + return ProxyProtocolPassThroughTLVs_INCLUDE_ALL +} + +func (x *ProxyProtocolPassThroughTLVs) GetTlvType() []uint32 { + if x != nil { + return x.TlvType + } + return nil +} + +type ProxyProtocolConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version ProxyProtocolConfig_Version `protobuf:"varint,1,opt,name=version,proto3,enum=envoy.config.core.v3.ProxyProtocolConfig_Version" json:"version,omitempty"` + // This config controls which TLVs can be passed to upstream if it is Proxy Protocol + // V2 header. If there is no setting for this field, no TLVs will be passed through. + PassThroughTlvs *ProxyProtocolPassThroughTLVs `protobuf:"bytes,2,opt,name=pass_through_tlvs,json=passThroughTlvs,proto3" json:"pass_through_tlvs,omitempty"` +} + +func (x *ProxyProtocolConfig) Reset() { + *x = ProxyProtocolConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyProtocolConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyProtocolConfig) ProtoMessage() {} + +func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyProtocolConfig.ProtoReflect.Descriptor instead. +func (*ProxyProtocolConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1} +} + +func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version { + if x != nil { + return x.Version + } + return ProxyProtocolConfig_V1 +} + +func (x *ProxyProtocolConfig) GetPassThroughTlvs() *ProxyProtocolPassThroughTLVs { + if x != nil { + return x.PassThroughTlvs + } + return nil +} + +var File_envoy_config_core_v3_proxy_protocol_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x1c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, + 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, 0x12, 0x63, 0x0a, 0x0a, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, + 0x56, 0x73, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x28, 0x0a, 0x08, 0x74, 0x6c, 0x76, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0d, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x2a, 0x03, 0x10, 0x80, 0x02, + 0x52, 0x07, 0x74, 0x6c, 0x76, 0x54, 0x79, 0x70, 0x65, 0x22, 0x31, 0x0a, 0x11, 0x50, 0x61, 0x73, + 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x10, 0x01, 0x22, 0xdd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, + 0x68, 0x5f, 0x74, 0x6c, 0x76, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, + 0x52, 0x0f, 0x70, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x6c, 0x76, + 0x73, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, + 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42, 0x86, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = file_envoy_config_core_v3_proxy_protocol_proto_rawDesc +) + +func file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_proxy_protocol_proto_rawDescData) + }) + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescData +} + +var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_proxy_protocol_proto_goTypes = []interface{}{ + (ProxyProtocolPassThroughTLVs_PassTLVsMatchType)(0), // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + (ProxyProtocolConfig_Version)(0), // 1: envoy.config.core.v3.ProxyProtocolConfig.Version + (*ProxyProtocolPassThroughTLVs)(nil), // 2: envoy.config.core.v3.ProxyProtocolPassThroughTLVs + (*ProxyProtocolConfig)(nil), // 3: envoy.config.core.v3.ProxyProtocolConfig +} +var file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.match_type:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + 1, // 1: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version + 2, // 2: envoy.config.core.v3.ProxyProtocolConfig.pass_through_tlvs:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_proxy_protocol_proto_init() } +func file_envoy_config_core_v3_proxy_protocol_proto_init() { + if File_envoy_config_core_v3_proxy_protocol_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyProtocolPassThroughTLVs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyProtocolConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_proxy_protocol_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_proxy_protocol_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_proxy_protocol_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_proxy_protocol_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_proxy_protocol_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_proxy_protocol_proto = out.File + file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = nil + file_envoy_config_core_v3_proxy_protocol_proto_goTypes = nil + file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go new file mode 100644 index 000000000..66555a166 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go @@ -0,0 +1,291 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ProxyProtocolPassThroughTLVs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ProxyProtocolPassThroughTLVsMultiError, or nil if none found. +func (m *ProxyProtocolPassThroughTLVs) ValidateAll() error { + return m.validate(true) +} + +func (m *ProxyProtocolPassThroughTLVs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MatchType + + for idx, item := range m.GetTlvType() { + _, _ = idx, item + + if item >= 256 { + err := ProxyProtocolPassThroughTLVsValidationError{ + field: fmt.Sprintf("TlvType[%v]", idx), + reason: "value must be less than 256", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ProxyProtocolPassThroughTLVsMultiError(errors) + } + + return nil +} + +// ProxyProtocolPassThroughTLVsMultiError is an error wrapping multiple +// validation errors returned by ProxyProtocolPassThroughTLVs.ValidateAll() if +// the designated constraints aren't met. +type ProxyProtocolPassThroughTLVsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ProxyProtocolPassThroughTLVsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ProxyProtocolPassThroughTLVsMultiError) AllErrors() []error { return m } + +// ProxyProtocolPassThroughTLVsValidationError is the validation error returned +// by ProxyProtocolPassThroughTLVs.Validate if the designated constraints +// aren't met. +type ProxyProtocolPassThroughTLVsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ProxyProtocolPassThroughTLVsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ProxyProtocolPassThroughTLVsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ProxyProtocolPassThroughTLVsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ProxyProtocolPassThroughTLVsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ProxyProtocolPassThroughTLVsValidationError) ErrorName() string { + return "ProxyProtocolPassThroughTLVsValidationError" +} + +// Error satisfies the builtin error interface +func (e ProxyProtocolPassThroughTLVsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sProxyProtocolPassThroughTLVs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ProxyProtocolPassThroughTLVsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ProxyProtocolPassThroughTLVsValidationError{} + +// Validate checks the field values on ProxyProtocolConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ProxyProtocolConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ProxyProtocolConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ProxyProtocolConfigMultiError, or nil if none found. +func (m *ProxyProtocolConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ProxyProtocolConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetPassThroughTlvs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPassThroughTlvs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ProxyProtocolConfigMultiError(errors) + } + + return nil +} + +// ProxyProtocolConfigMultiError is an error wrapping multiple validation +// errors returned by ProxyProtocolConfig.ValidateAll() if the designated +// constraints aren't met. +type ProxyProtocolConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ProxyProtocolConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ProxyProtocolConfigMultiError) AllErrors() []error { return m } + +// ProxyProtocolConfigValidationError is the validation error returned by +// ProxyProtocolConfig.Validate if the designated constraints aren't met. +type ProxyProtocolConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ProxyProtocolConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ProxyProtocolConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ProxyProtocolConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ProxyProtocolConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ProxyProtocolConfigValidationError) ErrorName() string { + return "ProxyProtocolConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ProxyProtocolConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sProxyProtocolConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ProxyProtocolConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ProxyProtocolConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go new file mode 100644 index 000000000..d429640ee --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go @@ -0,0 +1,162 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ProxyProtocolPassThroughTLVs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TlvType) > 0 { + var pksize2 int + for _, num := range m.TlvType { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.TlvType { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MatchType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MatchType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PassThroughTlvs != nil { + size, err := m.PassThroughTlvs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolPassThroughTLVs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MatchType)) + } + if len(m.TlvType) > 0 { + l = 0 + for _, e := range m.TlvType { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *ProxyProtocolConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Version)) + } + if m.PassThroughTlvs != nil { + l = m.PassThroughTlvs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go new file mode 100644 index 000000000..fc4ec52de --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. +type DnsResolverOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Use TCP for all DNS queries instead of the default protocol UDP. + UseTcpForDnsLookups bool `protobuf:"varint,1,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // Do not use the default search domains; only query hostnames as-is or as aliases. + NoDefaultSearchDomain bool `protobuf:"varint,2,opt,name=no_default_search_domain,json=noDefaultSearchDomain,proto3" json:"no_default_search_domain,omitempty"` +} + +func (x *DnsResolverOptions) Reset() { + *x = DnsResolverOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DnsResolverOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DnsResolverOptions) ProtoMessage() {} + +func (x *DnsResolverOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DnsResolverOptions.ProtoReflect.Descriptor instead. +func (*DnsResolverOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{0} +} + +func (x *DnsResolverOptions) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +func (x *DnsResolverOptions) GetNoDefaultSearchDomain() bool { + if x != nil { + return x.NoDefaultSearchDomain + } + return false +} + +// DNS resolution configuration which includes the underlying dns resolver addresses and options. +type DnsResolutionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of dns resolver addresses. If specified, the DNS client library will perform resolution + // via the underlying DNS resolvers. Otherwise, the default system resolvers + // (e.g., /etc/resolv.conf) will be used. + Resolvers []*Address `protobuf:"bytes,1,rep,name=resolvers,proto3" json:"resolvers,omitempty"` + // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + DnsResolverOptions *DnsResolverOptions `protobuf:"bytes,2,opt,name=dns_resolver_options,json=dnsResolverOptions,proto3" json:"dns_resolver_options,omitempty"` +} + +func (x *DnsResolutionConfig) Reset() { + *x = DnsResolutionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DnsResolutionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DnsResolutionConfig) ProtoMessage() {} + +func (x *DnsResolutionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DnsResolutionConfig.ProtoReflect.Descriptor instead. +func (*DnsResolutionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{1} +} + +func (x *DnsResolutionConfig) GetResolvers() []*Address { + if x != nil { + return x.Resolvers + } + return nil +} + +func (x *DnsResolutionConfig) GetDnsResolverOptions() *DnsResolverOptions { + if x != nil { + return x.DnsResolverOptions + } + return nil +} + +var File_envoy_config_core_v3_resolver_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_resolver_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, + 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, + 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xb8, 0x01, + 0x0a, 0x13, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, + 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_resolver_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_resolver_proto_rawDescData = file_envoy_config_core_v3_resolver_proto_rawDesc +) + +func file_envoy_config_core_v3_resolver_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_resolver_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_resolver_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_resolver_proto_rawDescData) + }) + return file_envoy_config_core_v3_resolver_proto_rawDescData +} + +var file_envoy_config_core_v3_resolver_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_resolver_proto_goTypes = []interface{}{ + (*DnsResolverOptions)(nil), // 0: envoy.config.core.v3.DnsResolverOptions + (*DnsResolutionConfig)(nil), // 1: envoy.config.core.v3.DnsResolutionConfig + (*Address)(nil), // 2: envoy.config.core.v3.Address +} +var file_envoy_config_core_v3_resolver_proto_depIdxs = []int32{ + 2, // 0: envoy.config.core.v3.DnsResolutionConfig.resolvers:type_name -> envoy.config.core.v3.Address + 0, // 1: envoy.config.core.v3.DnsResolutionConfig.dns_resolver_options:type_name -> envoy.config.core.v3.DnsResolverOptions + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_resolver_proto_init() } +func file_envoy_config_core_v3_resolver_proto_init() { + if File_envoy_config_core_v3_resolver_proto != nil { + return + } + file_envoy_config_core_v3_address_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_resolver_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DnsResolverOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_resolver_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DnsResolutionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_resolver_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_resolver_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_resolver_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_resolver_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_resolver_proto = out.File + file_envoy_config_core_v3_resolver_proto_rawDesc = nil + file_envoy_config_core_v3_resolver_proto_goTypes = nil + file_envoy_config_core_v3_resolver_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go new file mode 100644 index 000000000..e9f661f67 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go @@ -0,0 +1,319 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DnsResolverOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DnsResolverOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DnsResolverOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DnsResolverOptionsMultiError, or nil if none found. +func (m *DnsResolverOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *DnsResolverOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseTcpForDnsLookups + + // no validation rules for NoDefaultSearchDomain + + if len(errors) > 0 { + return DnsResolverOptionsMultiError(errors) + } + + return nil +} + +// DnsResolverOptionsMultiError is an error wrapping multiple validation errors +// returned by DnsResolverOptions.ValidateAll() if the designated constraints +// aren't met. +type DnsResolverOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DnsResolverOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DnsResolverOptionsMultiError) AllErrors() []error { return m } + +// DnsResolverOptionsValidationError is the validation error returned by +// DnsResolverOptions.Validate if the designated constraints aren't met. +type DnsResolverOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DnsResolverOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DnsResolverOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DnsResolverOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DnsResolverOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DnsResolverOptionsValidationError) ErrorName() string { + return "DnsResolverOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e DnsResolverOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDnsResolverOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DnsResolverOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DnsResolverOptionsValidationError{} + +// Validate checks the field values on DnsResolutionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DnsResolutionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DnsResolutionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DnsResolutionConfigMultiError, or nil if none found. +func (m *DnsResolutionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DnsResolutionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetResolvers()) < 1 { + err := DnsResolutionConfigValidationError{ + field: "Resolvers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResolvers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDnsResolverOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolverOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DnsResolutionConfigMultiError(errors) + } + + return nil +} + +// DnsResolutionConfigMultiError is an error wrapping multiple validation +// errors returned by DnsResolutionConfig.ValidateAll() if the designated +// constraints aren't met. +type DnsResolutionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DnsResolutionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DnsResolutionConfigMultiError) AllErrors() []error { return m } + +// DnsResolutionConfigValidationError is the validation error returned by +// DnsResolutionConfig.Validate if the designated constraints aren't met. +type DnsResolutionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DnsResolutionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DnsResolutionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DnsResolutionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DnsResolutionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DnsResolutionConfigValidationError) ErrorName() string { + return "DnsResolutionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e DnsResolutionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDnsResolutionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DnsResolutionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DnsResolutionConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go new file mode 100644 index 000000000..5ae614bb2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go @@ -0,0 +1,163 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DnsResolverOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolverOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolverOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NoDefaultSearchDomain { + i-- + if m.NoDefaultSearchDomain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DnsResolutionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolutionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolutionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DnsResolverOptions != nil { + size, err := m.DnsResolverOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Resolvers) > 0 { + for iNdEx := len(m.Resolvers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resolvers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DnsResolverOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseTcpForDnsLookups { + n += 2 + } + if m.NoDefaultSearchDomain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DnsResolutionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resolvers) > 0 { + for _, e := range m.Resolvers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsResolverOptions != nil { + l = m.DnsResolverOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go new file mode 100644 index 000000000..2b684f57b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go @@ -0,0 +1,615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SocketOption_SocketState int32 + +const ( + // Socket options are applied after socket creation but before binding the socket to a port + SocketOption_STATE_PREBIND SocketOption_SocketState = 0 + // Socket options are applied after binding the socket to a port but before calling listen() + SocketOption_STATE_BOUND SocketOption_SocketState = 1 + // Socket options are applied after calling listen() + SocketOption_STATE_LISTENING SocketOption_SocketState = 2 +) + +// Enum value maps for SocketOption_SocketState. +var ( + SocketOption_SocketState_name = map[int32]string{ + 0: "STATE_PREBIND", + 1: "STATE_BOUND", + 2: "STATE_LISTENING", + } + SocketOption_SocketState_value = map[string]int32{ + "STATE_PREBIND": 0, + "STATE_BOUND": 1, + "STATE_LISTENING": 2, + } +) + +func (x SocketOption_SocketState) Enum() *SocketOption_SocketState { + p := new(SocketOption_SocketState) + *p = x + return p +} + +func (x SocketOption_SocketState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SocketOption_SocketState) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_socket_option_proto_enumTypes[0].Descriptor() +} + +func (SocketOption_SocketState) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_socket_option_proto_enumTypes[0] +} + +func (x SocketOption_SocketState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SocketOption_SocketState.Descriptor instead. +func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0} +} + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +// +// For example: +// +// .. code-block:: json +// +// { +// "description": "support tcp keep alive", +// "state": 0, +// "level": 1, +// "name": 9, +// "int_value": 1, +// } +// +// 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux. +// With the above configuration, `TCP Keep-Alives `_ +// can be enabled in socket with Linux, which can be used in +// :ref:`listener's` or +// :ref:`admin's ` socket_options etc. +// +// It should be noted that the name or level may have different values on different platforms. +// [#next-free-field: 8] +type SocketOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + // The numeric name as passed to setsockopt + Name int64 `protobuf:"varint,3,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to Value: + // + // *SocketOption_IntValue + // *SocketOption_BufValue + Value isSocketOption_Value `protobuf_oneof:"value"` + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.config.core.v3.SocketOption_SocketState" json:"state,omitempty"` + // Apply the socket option to the specified `socket type `_. + // If not specified, the socket option will be applied to all socket types. + Type *SocketOption_SocketType `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *SocketOption) Reset() { + *x = SocketOption{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption) ProtoMessage() {} + +func (x *SocketOption) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption.ProtoReflect.Descriptor instead. +func (*SocketOption) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0} +} + +func (x *SocketOption) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SocketOption) GetLevel() int64 { + if x != nil { + return x.Level + } + return 0 +} + +func (x *SocketOption) GetName() int64 { + if x != nil { + return x.Name + } + return 0 +} + +func (m *SocketOption) GetValue() isSocketOption_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *SocketOption) GetIntValue() int64 { + if x, ok := x.GetValue().(*SocketOption_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *SocketOption) GetBufValue() []byte { + if x, ok := x.GetValue().(*SocketOption_BufValue); ok { + return x.BufValue + } + return nil +} + +func (x *SocketOption) GetState() SocketOption_SocketState { + if x != nil { + return x.State + } + return SocketOption_STATE_PREBIND +} + +func (x *SocketOption) GetType() *SocketOption_SocketType { + if x != nil { + return x.Type + } + return nil +} + +type isSocketOption_Value interface { + isSocketOption_Value() +} + +type SocketOption_IntValue struct { + // Because many sockopts take an int value. + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type SocketOption_BufValue struct { + // Otherwise it's a byte buffer. + BufValue []byte `protobuf:"bytes,5,opt,name=buf_value,json=bufValue,proto3,oneof"` +} + +func (*SocketOption_IntValue) isSocketOption_Value() {} + +func (*SocketOption_BufValue) isSocketOption_Value() {} + +type SocketOptionsOverride struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SocketOptions []*SocketOption `protobuf:"bytes,1,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *SocketOptionsOverride) Reset() { + *x = SocketOptionsOverride{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOptionsOverride) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOptionsOverride) ProtoMessage() {} + +func (x *SocketOptionsOverride) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOptionsOverride.ProtoReflect.Descriptor instead. +func (*SocketOptionsOverride) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{1} +} + +func (x *SocketOptionsOverride) GetSocketOptions() []*SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +// The `socket type `_ to apply the socket option to. +// Only one field should be set. If multiple fields are set, the precedence order will determine +// the selected one. If none of the fields is set, the socket option will be applied to all socket types. +// +// For example: +// If :ref:`stream ` is set, +// it takes precedence over :ref:`datagram `. +type SocketOption_SocketType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Apply the socket option to the stream socket type. + Stream *SocketOption_SocketType_Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + // Apply the socket option to the datagram socket type. + Datagram *SocketOption_SocketType_Datagram `protobuf:"bytes,2,opt,name=datagram,proto3" json:"datagram,omitempty"` +} + +func (x *SocketOption_SocketType) Reset() { + *x = SocketOption_SocketType{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType) ProtoMessage() {} + +func (x *SocketOption_SocketType) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SocketOption_SocketType) GetStream() *SocketOption_SocketType_Stream { + if x != nil { + return x.Stream + } + return nil +} + +func (x *SocketOption_SocketType) GetDatagram() *SocketOption_SocketType_Datagram { + if x != nil { + return x.Datagram + } + return nil +} + +// The stream socket type. +type SocketOption_SocketType_Stream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Stream) Reset() { + *x = SocketOption_SocketType_Stream{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Stream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Stream) ProtoMessage() {} + +func (x *SocketOption_SocketType_Stream) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Stream.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Stream) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// The datagram socket type. +type SocketOption_SocketType_Datagram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Datagram) Reset() { + *x = SocketOption_SocketType_Datagram{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Datagram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Datagram) ProtoMessage() {} + +func (x *SocketOption_SocketType_Datagram) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Datagram.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Datagram) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 1} +} + +var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x04, 0x0a, 0x0c, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, + 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x62, 0x75, 0x66, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x75, + 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x0a, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x06, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, + 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0x08, 0x0a, 0x06, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x1a, 0x0a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, + 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, + 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_core_v3_socket_option_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_socket_option_proto_rawDescData = file_envoy_config_core_v3_socket_option_proto_rawDesc +) + +func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_socket_option_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_socket_option_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_socket_option_proto_rawDescData) + }) + return file_envoy_config_core_v3_socket_option_proto_rawDescData +} + +var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{ + (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState + (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption + (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride + (*SocketOption_SocketType)(nil), // 3: envoy.config.core.v3.SocketOption.SocketType + (*SocketOption_SocketType_Stream)(nil), // 4: envoy.config.core.v3.SocketOption.SocketType.Stream + (*SocketOption_SocketType_Datagram)(nil), // 5: envoy.config.core.v3.SocketOption.SocketType.Datagram +} +var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState + 3, // 1: envoy.config.core.v3.SocketOption.type:type_name -> envoy.config.core.v3.SocketOption.SocketType + 1, // 2: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption + 4, // 3: envoy.config.core.v3.SocketOption.SocketType.stream:type_name -> envoy.config.core.v3.SocketOption.SocketType.Stream + 5, // 4: envoy.config.core.v3.SocketOption.SocketType.datagram:type_name -> envoy.config.core.v3.SocketOption.SocketType.Datagram + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_socket_option_proto_init() } +func file_envoy_config_core_v3_socket_option_proto_init() { + if File_envoy_config_core_v3_socket_option_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_socket_option_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOptionsOverride); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Stream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Datagram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*SocketOption_IntValue)(nil), + (*SocketOption_BufValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_socket_option_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_socket_option_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_socket_option_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_socket_option_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_socket_option_proto = out.File + file_envoy_config_core_v3_socket_option_proto_rawDesc = nil + file_envoy_config_core_v3_socket_option_proto_goTypes = nil + file_envoy_config_core_v3_socket_option_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go new file mode 100644 index 000000000..944e08984 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go @@ -0,0 +1,728 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SocketOption with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SocketOption) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SocketOptionMultiError, or +// nil if none found. +func (m *SocketOption) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Description + + // no validation rules for Level + + // no validation rules for Name + + if _, ok := SocketOption_SocketState_name[int32(m.GetState())]; !ok { + err := SocketOptionValidationError{ + field: "State", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetType()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetType()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofValuePresent := false + switch v := m.Value.(type) { + case *SocketOption_IntValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true + // no validation rules for IntValue + case *SocketOption_BufValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true + // no validation rules for BufValue + default: + _ = v // ensures v is used + } + if !oneofValuePresent { + err := SocketOptionValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SocketOptionMultiError(errors) + } + + return nil +} + +// SocketOptionMultiError is an error wrapping multiple validation errors +// returned by SocketOption.ValidateAll() if the designated constraints aren't met. +type SocketOptionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOptionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOptionMultiError) AllErrors() []error { return m } + +// SocketOptionValidationError is the validation error returned by +// SocketOption.Validate if the designated constraints aren't met. +type SocketOptionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOptionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOptionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOptionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOptionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOptionValidationError) ErrorName() string { return "SocketOptionValidationError" } + +// Error satisfies the builtin error interface +func (e SocketOptionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOptionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOptionValidationError{} + +// Validate checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOptionsOverride) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOptionsOverrideMultiError, or nil if none found. +func (m *SocketOptionsOverride) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOptionsOverride) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SocketOptionsOverrideMultiError(errors) + } + + return nil +} + +// SocketOptionsOverrideMultiError is an error wrapping multiple validation +// errors returned by SocketOptionsOverride.ValidateAll() if the designated +// constraints aren't met. +type SocketOptionsOverrideMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOptionsOverrideMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOptionsOverrideMultiError) AllErrors() []error { return m } + +// SocketOptionsOverrideValidationError is the validation error returned by +// SocketOptionsOverride.Validate if the designated constraints aren't met. +type SocketOptionsOverrideValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOptionsOverrideValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOptionsOverrideValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOptionsOverrideValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOptionsOverrideValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOptionsOverrideValidationError) ErrorName() string { + return "SocketOptionsOverrideValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOptionsOverrideValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOptionsOverride.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOptionsOverrideValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOptionsOverrideValidationError{} + +// Validate checks the field values on SocketOption_SocketType with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOption_SocketTypeMultiError, or nil if none found. +func (m *SocketOption_SocketType) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetStream()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStream()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDatagram()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDatagram()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SocketOption_SocketTypeMultiError(errors) + } + + return nil +} + +// SocketOption_SocketTypeMultiError is an error wrapping multiple validation +// errors returned by SocketOption_SocketType.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketTypeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketTypeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketTypeMultiError) AllErrors() []error { return m } + +// SocketOption_SocketTypeValidationError is the validation error returned by +// SocketOption_SocketType.Validate if the designated constraints aren't met. +type SocketOption_SocketTypeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketTypeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketTypeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketTypeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketTypeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketTypeValidationError) ErrorName() string { + return "SocketOption_SocketTypeValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketTypeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketTypeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketTypeValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Stream with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType_Stream) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Stream with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_StreamMultiError, or nil if none found. +func (m *SocketOption_SocketType_Stream) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Stream) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_StreamMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_StreamMultiError is an error wrapping multiple +// validation errors returned by SocketOption_SocketType_Stream.ValidateAll() +// if the designated constraints aren't met. +type SocketOption_SocketType_StreamMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_StreamMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_StreamMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_StreamValidationError is the validation error +// returned by SocketOption_SocketType_Stream.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_StreamValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_StreamValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_StreamValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_StreamValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_StreamValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_StreamValidationError) ErrorName() string { + return "SocketOption_SocketType_StreamValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_StreamValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Stream.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_StreamValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_StreamValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *SocketOption_SocketType_Datagram) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_DatagramMultiError, or nil if none found. +func (m *SocketOption_SocketType_Datagram) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Datagram) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_DatagramMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_DatagramMultiError is an error wrapping multiple +// validation errors returned by +// SocketOption_SocketType_Datagram.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_DatagramMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_DatagramMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_DatagramValidationError is the validation error +// returned by SocketOption_SocketType_Datagram.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_DatagramValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_DatagramValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_DatagramValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_DatagramValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_DatagramValidationError) ErrorName() string { + return "SocketOption_SocketType_DatagramValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_DatagramValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Datagram.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_DatagramValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_DatagramValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go new file mode 100644 index 000000000..75f5db512 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go @@ -0,0 +1,391 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SocketOption_SocketType_Stream) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Stream) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Stream) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Datagram) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Datagram != nil { + size, err := m.Datagram.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Stream != nil { + size, err := m.Stream.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Type != nil { + size, err := m.Type.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.Value.(*SocketOption_BufValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Value.(*SocketOption_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Name != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Name)) + i-- + dAtA[i] = 0x18 + } + if m.Level != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *SocketOption_BufValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_BufValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BufValue) + copy(dAtA[i:], m.BufValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BufValue))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *SocketOptionsOverride) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOptionsOverride) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOptionsOverride) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType_Datagram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stream != nil { + l = m.Stream.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Datagram != nil { + l = m.Datagram.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Level != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Level)) + } + if m.Name != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Name)) + } + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.Type != nil { + l = m.Type.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *SocketOption_BufValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BufValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketOptionsOverride) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go new file mode 100644 index 000000000..8c7fda3a1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go @@ -0,0 +1,439 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Optional configuration options to be used with json_format. +type JsonFormatOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The output JSON string properties will be sorted. + SortProperties bool `protobuf:"varint,1,opt,name=sort_properties,json=sortProperties,proto3" json:"sort_properties,omitempty"` +} + +func (x *JsonFormatOptions) Reset() { + *x = JsonFormatOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JsonFormatOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JsonFormatOptions) ProtoMessage() {} + +func (x *JsonFormatOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JsonFormatOptions.ProtoReflect.Descriptor instead. +func (*JsonFormatOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{0} +} + +func (x *JsonFormatOptions) GetSortProperties() bool { + if x != nil { + return x.SortProperties + } + return false +} + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +// [#next-free-field: 8] +type SubstitutionFormatString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Format: + // + // *SubstitutionFormatString_TextFormat + // *SubstitutionFormatString_JsonFormat + // *SubstitutionFormatString_TextFormatSource + Format isSubstitutionFormatString_Format `protobuf_oneof:"format"` + // If set to true, when command operators are evaluated to null, + // + // - for “text_format“, the output of the empty operator is changed from “-“ to an + // empty string, so that empty values are omitted entirely. + // - for “json_format“ the keys with null values are omitted in the output structure. + OmitEmptyValues bool `protobuf:"varint,3,opt,name=omit_empty_values,json=omitEmptyValues,proto3" json:"omit_empty_values,omitempty"` + // Specify a “content_type“ field. + // If this field is not set then “text/plain“ is used for “text_format“ and + // “application/json“ is used for “json_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Specifies a collection of Formatter plugins that can be called from the access log configuration. + // See the formatters extensions documentation for details. + // [#extension-category: envoy.formatter] + Formatters []*TypedExtensionConfig `protobuf:"bytes,6,rep,name=formatters,proto3" json:"formatters,omitempty"` + // If json_format is used, the options will be applied to the output JSON string. + JsonFormatOptions *JsonFormatOptions `protobuf:"bytes,7,opt,name=json_format_options,json=jsonFormatOptions,proto3" json:"json_format_options,omitempty"` +} + +func (x *SubstitutionFormatString) Reset() { + *x = SubstitutionFormatString{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubstitutionFormatString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubstitutionFormatString) ProtoMessage() {} + +func (x *SubstitutionFormatString) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubstitutionFormatString.ProtoReflect.Descriptor instead. +func (*SubstitutionFormatString) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{1} +} + +func (m *SubstitutionFormatString) GetFormat() isSubstitutionFormatString_Format { + if m != nil { + return m.Format + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/substitution_format_string.proto. +func (x *SubstitutionFormatString) GetTextFormat() string { + if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormat); ok { + return x.TextFormat + } + return "" +} + +func (x *SubstitutionFormatString) GetJsonFormat() *structpb.Struct { + if x, ok := x.GetFormat().(*SubstitutionFormatString_JsonFormat); ok { + return x.JsonFormat + } + return nil +} + +func (x *SubstitutionFormatString) GetTextFormatSource() *DataSource { + if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormatSource); ok { + return x.TextFormatSource + } + return nil +} + +func (x *SubstitutionFormatString) GetOmitEmptyValues() bool { + if x != nil { + return x.OmitEmptyValues + } + return false +} + +func (x *SubstitutionFormatString) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *SubstitutionFormatString) GetFormatters() []*TypedExtensionConfig { + if x != nil { + return x.Formatters + } + return nil +} + +func (x *SubstitutionFormatString) GetJsonFormatOptions() *JsonFormatOptions { + if x != nil { + return x.JsonFormatOptions + } + return nil +} + +type isSubstitutionFormatString_Format interface { + isSubstitutionFormatString_Format() +} + +type SubstitutionFormatString_TextFormat struct { + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting “text_format“ like below, + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/substitution_format_string.proto. + TextFormat string `protobuf:"bytes,1,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +type SubstitutionFormatString_JsonFormat struct { + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + JsonFormat *structpb.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"` +} + +type SubstitutionFormatString_TextFormatSource struct { + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting “text_format“ like below, + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format_source: + // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + TextFormatSource *DataSource `protobuf:"bytes,5,opt,name=text_format_source,json=textFormatSource,proto3,oneof"` +} + +func (*SubstitutionFormatString_TextFormat) isSubstitutionFormatString_Format() {} + +func (*SubstitutionFormatString_JsonFormat) isSubstitutionFormatString_Format() {} + +func (*SubstitutionFormatString_TextFormatSource) isSubstitutionFormatString_Format() {} + +var File_envoy_config_core_v3_substitution_format_string_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = []byte{ + 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x3c, 0x0a, 0x11, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, + 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xf2, 0x03, + 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0b, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, + 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x50, 0x0a, 0x12, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x10, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, + 0x6d, 0x69, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2e, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4a, + 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x13, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x11, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x42, 0x0d, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x1d, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = file_envoy_config_core_v3_substitution_format_string_proto_rawDesc +) + +func file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_substitution_format_string_proto_rawDescData) + }) + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescData +} + +var file_envoy_config_core_v3_substitution_format_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_substitution_format_string_proto_goTypes = []interface{}{ + (*JsonFormatOptions)(nil), // 0: envoy.config.core.v3.JsonFormatOptions + (*SubstitutionFormatString)(nil), // 1: envoy.config.core.v3.SubstitutionFormatString + (*structpb.Struct)(nil), // 2: google.protobuf.Struct + (*DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = []int32{ + 2, // 0: envoy.config.core.v3.SubstitutionFormatString.json_format:type_name -> google.protobuf.Struct + 3, // 1: envoy.config.core.v3.SubstitutionFormatString.text_format_source:type_name -> envoy.config.core.v3.DataSource + 4, // 2: envoy.config.core.v3.SubstitutionFormatString.formatters:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 3: envoy.config.core.v3.SubstitutionFormatString.json_format_options:type_name -> envoy.config.core.v3.JsonFormatOptions + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_substitution_format_string_proto_init() } +func file_envoy_config_core_v3_substitution_format_string_proto_init() { + if File_envoy_config_core_v3_substitution_format_string_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_extension_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonFormatOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubstitutionFormatString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*SubstitutionFormatString_TextFormat)(nil), + (*SubstitutionFormatString_JsonFormat)(nil), + (*SubstitutionFormatString_TextFormatSource)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_substitution_format_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_substitution_format_string_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_substitution_format_string_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_substitution_format_string_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_substitution_format_string_proto = out.File + file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = nil + file_envoy_config_core_v3_substitution_format_string_proto_goTypes = nil + file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go new file mode 100644 index 000000000..f21251e32 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go @@ -0,0 +1,445 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on JsonFormatOptions with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *JsonFormatOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on JsonFormatOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// JsonFormatOptionsMultiError, or nil if none found. +func (m *JsonFormatOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *JsonFormatOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SortProperties + + if len(errors) > 0 { + return JsonFormatOptionsMultiError(errors) + } + + return nil +} + +// JsonFormatOptionsMultiError is an error wrapping multiple validation errors +// returned by JsonFormatOptions.ValidateAll() if the designated constraints +// aren't met. +type JsonFormatOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m JsonFormatOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m JsonFormatOptionsMultiError) AllErrors() []error { return m } + +// JsonFormatOptionsValidationError is the validation error returned by +// JsonFormatOptions.Validate if the designated constraints aren't met. +type JsonFormatOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e JsonFormatOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e JsonFormatOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e JsonFormatOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e JsonFormatOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e JsonFormatOptionsValidationError) ErrorName() string { + return "JsonFormatOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e JsonFormatOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sJsonFormatOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = JsonFormatOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = JsonFormatOptionsValidationError{} + +// Validate checks the field values on SubstitutionFormatString with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubstitutionFormatString) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubstitutionFormatString with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubstitutionFormatStringMultiError, or nil if none found. +func (m *SubstitutionFormatString) ValidateAll() error { + return m.validate(true) +} + +func (m *SubstitutionFormatString) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for OmitEmptyValues + + if !_SubstitutionFormatString_ContentType_Pattern.MatchString(m.GetContentType()) { + err := SubstitutionFormatStringValidationError{ + field: "ContentType", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFormatters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetJsonFormatOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormatOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofFormatPresent := false + switch v := m.Format.(type) { + case *SubstitutionFormatString_TextFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + // no validation rules for TextFormat + case *SubstitutionFormatString_JsonFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + + if m.GetJsonFormat() == nil { + err := SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetJsonFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SubstitutionFormatString_TextFormatSource: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + + if all { + switch v := interface{}(m.GetTextFormatSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTextFormatSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFormatPresent { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SubstitutionFormatStringMultiError(errors) + } + + return nil +} + +// SubstitutionFormatStringMultiError is an error wrapping multiple validation +// errors returned by SubstitutionFormatString.ValidateAll() if the designated +// constraints aren't met. +type SubstitutionFormatStringMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubstitutionFormatStringMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubstitutionFormatStringMultiError) AllErrors() []error { return m } + +// SubstitutionFormatStringValidationError is the validation error returned by +// SubstitutionFormatString.Validate if the designated constraints aren't met. +type SubstitutionFormatStringValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubstitutionFormatStringValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubstitutionFormatStringValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubstitutionFormatStringValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubstitutionFormatStringValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubstitutionFormatStringValidationError) ErrorName() string { + return "SubstitutionFormatStringValidationError" +} + +// Error satisfies the builtin error interface +func (e SubstitutionFormatStringValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubstitutionFormatString.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubstitutionFormatStringValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubstitutionFormatStringValidationError{} + +var _SubstitutionFormatString_ContentType_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go new file mode 100644 index 000000000..2887f4d1a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go @@ -0,0 +1,298 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *JsonFormatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JsonFormatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *JsonFormatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SortProperties { + i-- + if m.SortProperties { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubstitutionFormatString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.JsonFormatOptions != nil { + size, err := m.JsonFormatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.Formatters) > 0 { + for iNdEx := len(m.Formatters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Formatters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormatSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ContentType) > 0 { + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + } + if m.OmitEmptyValues { + i-- + if m.OmitEmptyValues { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.Format.(*SubstitutionFormatString_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_TextFormatSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormatSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TextFormatSource != nil { + size, err := m.TextFormatSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *JsonFormatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SortProperties { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Format.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OmitEmptyValues { + n += 2 + } + l = len(m.ContentType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Formatters) > 0 { + for _, e := range m.Formatters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.JsonFormatOptions != nil { + l = m.JsonFormatOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubstitutionFormatString_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SubstitutionFormatString_TextFormatSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TextFormatSource != nil { + l = m.TextFormatSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go new file mode 100644 index 000000000..ebef4e642 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Generic UDP socket configuration. +type UdpSocketConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate + // more memory per socket. Received datagrams above this size will be dropped. If not set + // defaults to 1500 bytes. + MaxRxDatagramSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"` + // Configures whether Generic Receive Offload (GRO) + // _ is preferred when reading from the + // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. + // This option affects performance but not functionality. If GRO is not supported by the operating + // system, non-GRO receive will be used. + PreferGro *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"` +} + +func (x *UdpSocketConfig) Reset() { + *x = UdpSocketConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UdpSocketConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UdpSocketConfig) ProtoMessage() {} + +func (x *UdpSocketConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UdpSocketConfig.ProtoReflect.Descriptor instead. +func (*UdpSocketConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP(), []int{0} +} + +func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaxRxDatagramSize + } + return nil +} + +func (x *UdpSocketConfig) GetPreferGro() *wrapperspb.BoolValue { + if x != nil { + return x.PreferGro + } + return nil +} + +var File_envoy_config_core_v3_udp_socket_config_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, + 0x0f, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x5a, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x67, + 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x32, 0x06, 0x10, 0x80, 0x80, 0x04, 0x20, 0x00, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x78, + 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x42, 0x88, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = file_envoy_config_core_v3_udp_socket_config_proto_rawDesc +) + +func file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_udp_socket_config_proto_rawDescData) + }) + return file_envoy_config_core_v3_udp_socket_config_proto_rawDescData +} + +var file_envoy_config_core_v3_udp_socket_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_udp_socket_config_proto_goTypes = []interface{}{ + (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig + (*wrapperspb.UInt64Value)(nil), // 1: google.protobuf.UInt64Value + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue +} +var file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.UdpSocketConfig.max_rx_datagram_size:type_name -> google.protobuf.UInt64Value + 2, // 1: envoy.config.core.v3.UdpSocketConfig.prefer_gro:type_name -> google.protobuf.BoolValue + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_udp_socket_config_proto_init() } +func file_envoy_config_core_v3_udp_socket_config_proto_init() { + if File_envoy_config_core_v3_udp_socket_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UdpSocketConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_udp_socket_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_udp_socket_config_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_udp_socket_config_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_udp_socket_config_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_udp_socket_config_proto = out.File + file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = nil + file_envoy_config_core_v3_udp_socket_config_proto_goTypes = nil + file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go new file mode 100644 index 000000000..f977eda2f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go @@ -0,0 +1,181 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UdpSocketConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *UdpSocketConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UdpSocketConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UdpSocketConfigMultiError, or nil if none found. +func (m *UdpSocketConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UdpSocketConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMaxRxDatagramSize(); wrapper != nil { + + if val := wrapper.GetValue(); val <= 0 || val >= 65536 { + err := UdpSocketConfigValidationError{ + field: "MaxRxDatagramSize", + reason: "value must be inside range (0, 65536)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetPreferGro()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPreferGro()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UdpSocketConfigMultiError(errors) + } + + return nil +} + +// UdpSocketConfigMultiError is an error wrapping multiple validation errors +// returned by UdpSocketConfig.ValidateAll() if the designated constraints +// aren't met. +type UdpSocketConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UdpSocketConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UdpSocketConfigMultiError) AllErrors() []error { return m } + +// UdpSocketConfigValidationError is the validation error returned by +// UdpSocketConfig.Validate if the designated constraints aren't met. +type UdpSocketConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UdpSocketConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UdpSocketConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UdpSocketConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UdpSocketConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UdpSocketConfigValidationError) ErrorName() string { return "UdpSocketConfigValidationError" } + +// Error satisfies the builtin error interface +func (e UdpSocketConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUdpSocketConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UdpSocketConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UdpSocketConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go new file mode 100644 index 000000000..2809993ae --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpSocketConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpSocketConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpSocketConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferGro != nil { + size, err := (*wrapperspb.BoolValue)(m.PreferGro).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxRxDatagramSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UdpSocketConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxRxDatagramSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreferGro != nil { + l = (*wrapperspb.BoolValue)(m.PreferGro).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go new file mode 100644 index 000000000..96122a01d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go @@ -0,0 +1,508 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. +// [#next-free-field: 6] +type ClusterLoadAssignment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // List of endpoints to load balance to. + Endpoints []*LocalityLbEndpoints `protobuf:"bytes,2,rep,name=endpoints,proto3" json:"endpoints,omitempty"` + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + // [#not-implemented-hide:] + NamedEndpoints map[string]*Endpoint `protobuf:"bytes,5,rep,name=named_endpoints,json=namedEndpoints,proto3" json:"named_endpoints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Load balancing policy settings. + Policy *ClusterLoadAssignment_Policy `protobuf:"bytes,4,opt,name=policy,proto3" json:"policy,omitempty"` +} + +func (x *ClusterLoadAssignment) Reset() { + *x = ClusterLoadAssignment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment) ProtoMessage() {} + +func (x *ClusterLoadAssignment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterLoadAssignment) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterLoadAssignment) GetEndpoints() []*LocalityLbEndpoints { + if x != nil { + return x.Endpoints + } + return nil +} + +func (x *ClusterLoadAssignment) GetNamedEndpoints() map[string]*Endpoint { + if x != nil { + return x.NamedEndpoints + } + return nil +} + +func (x *ClusterLoadAssignment) GetPolicy() *ClusterLoadAssignment_Policy { + if x != nil { + return x.Policy + } + return nil +} + +// Load balancing policy settings. +// [#next-free-field: 7] +type ClusterLoadAssignment_Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Action to trim the overall incoming traffic to protect the upstream + // hosts. This action allows protection in case the hosts are unable to + // recover from an outage, or unable to autoscale or unable to handle + // incoming traffic volume for any reason. + // + // At the client each category is applied one after the other to generate + // the 'actual' drop percentage on all outgoing traffic. For example: + // + // .. code-block:: json + // + // { "drop_overloads": [ + // { "category": "throttle", "drop_percentage": 60 } + // { "category": "lb", "drop_percentage": 50 } + // ]} + // + // The actual drop percentages applied to the traffic at the clients will be + // + // "throttle"_drop = 60% + // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. + // actual_outgoing_load = 20% // remaining after applying all categories. + // + // Envoy supports only one element and will NACK if more than one element is present. + // Other xDS-capable data planes will not necessarily have this limitation. + // + // In Envoy, this “drop_overloads“ config can be overridden by a runtime key + // "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to + // any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%. + // When both “drop_overloads“ config and "load_balancing_policy.drop_overload_limit" + // setting are in place, the min of these two wins. + DropOverloads []*ClusterLoadAssignment_Policy_DropOverload `protobuf:"bytes,2,rep,name=drop_overloads,json=dropOverloads,proto3" json:"drop_overloads,omitempty"` + // Priority levels and localities are considered overprovisioned with this + // factor (in percentage). This means that we don't consider a priority + // level or locality unhealthy until the fraction of healthy hosts + // multiplied by the overprovisioning factor drops below 100. + // With the default value 140(1.4), Envoy doesn't consider a priority level + // or a locality unhealthy until their percentage of healthy hosts drops + // below 72%. For example: + // + // .. code-block:: json + // + // { "overprovisioning_factor": 100 } + // + // Read more at :ref:`priority levels ` and + // :ref:`localities `. + OverprovisioningFactor *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=overprovisioning_factor,json=overprovisioningFactor,proto3" json:"overprovisioning_factor,omitempty"` + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + EndpointStaleAfter *durationpb.Duration `protobuf:"bytes,4,opt,name=endpoint_stale_after,json=endpointStaleAfter,proto3" json:"endpoint_stale_after,omitempty"` + // If true, use the :ref:`load balancing weight + // ` of healthy and unhealthy + // hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts + // to determine the health of the priority level, or in other words assume each host has a weight of 1 for + // this calculation. + // + // Note: this is not currently implemented for + // :ref:`locality weighted load balancing `. + WeightedPriorityHealth bool `protobuf:"varint,6,opt,name=weighted_priority_health,json=weightedPriorityHealth,proto3" json:"weighted_priority_health,omitempty"` +} + +func (x *ClusterLoadAssignment_Policy) Reset() { + *x = ClusterLoadAssignment_Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment_Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment_Policy) ProtoMessage() {} + +func (x *ClusterLoadAssignment_Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment_Policy.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment_Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ClusterLoadAssignment_Policy) GetDropOverloads() []*ClusterLoadAssignment_Policy_DropOverload { + if x != nil { + return x.DropOverloads + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetOverprovisioningFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.OverprovisioningFactor + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetEndpointStaleAfter() *durationpb.Duration { + if x != nil { + return x.EndpointStaleAfter + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetWeightedPriorityHealth() bool { + if x != nil { + return x.WeightedPriorityHealth + } + return false +} + +type ClusterLoadAssignment_Policy_DropOverload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the policy specifying the drop. + Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` + // Percentage of traffic that should be dropped for the category. + DropPercentage *v3.FractionalPercent `protobuf:"bytes,2,opt,name=drop_percentage,json=dropPercentage,proto3" json:"drop_percentage,omitempty"` +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) Reset() { + *x = ClusterLoadAssignment_Policy_DropOverload{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment_Policy_DropOverload) ProtoMessage() {} + +func (x *ClusterLoadAssignment_Policy_DropOverload) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment_Policy_DropOverload.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment_Policy_DropOverload) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) GetDropPercentage() *v3.FractionalPercent { + if x != nil { + return x.DropPercentage + } + return nil +} + +var File_envoy_config_endpoint_v3_endpoint_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_endpoint_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe0, 0x08, 0x0a, 0x15, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, + 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x12, 0x4e, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x1a, 0xfd, 0x04, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x6a, 0x0a, 0x0e, 0x64, + 0x72, 0x6f, 0x70, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x72, 0x6f, 0x70, + 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x5e, 0x0a, 0x17, 0x6f, 0x76, 0x65, 0x72, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, + 0x16, 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x55, 0x0a, 0x14, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x38, + 0x0a, 0x18, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x16, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x1a, 0xbd, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, + 0x70, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x49, + 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x72, 0x6f, 0x70, + 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x18, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x1a, 0x65, 0x0a, 0x13, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_endpoint_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_endpoint_proto_rawDescData = file_envoy_config_endpoint_v3_endpoint_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_endpoint_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_endpoint_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_endpoint_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_endpoint_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_config_endpoint_v3_endpoint_proto_goTypes = []interface{}{ + (*ClusterLoadAssignment)(nil), // 0: envoy.config.endpoint.v3.ClusterLoadAssignment + (*ClusterLoadAssignment_Policy)(nil), // 1: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy + nil, // 2: envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry + (*ClusterLoadAssignment_Policy_DropOverload)(nil), // 3: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload + (*LocalityLbEndpoints)(nil), // 4: envoy.config.endpoint.v3.LocalityLbEndpoints + (*wrapperspb.UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*Endpoint)(nil), // 7: envoy.config.endpoint.v3.Endpoint + (*v3.FractionalPercent)(nil), // 8: envoy.type.v3.FractionalPercent +} +var file_envoy_config_endpoint_v3_endpoint_proto_depIdxs = []int32{ + 4, // 0: envoy.config.endpoint.v3.ClusterLoadAssignment.endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints + 2, // 1: envoy.config.endpoint.v3.ClusterLoadAssignment.named_endpoints:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry + 1, // 2: envoy.config.endpoint.v3.ClusterLoadAssignment.policy:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.Policy + 3, // 3: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.drop_overloads:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload + 5, // 4: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.overprovisioning_factor:type_name -> google.protobuf.UInt32Value + 6, // 5: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.endpoint_stale_after:type_name -> google.protobuf.Duration + 7, // 6: envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry.value:type_name -> envoy.config.endpoint.v3.Endpoint + 8, // 7: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload.drop_percentage:type_name -> envoy.type.v3.FractionalPercent + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_endpoint_proto_init() } +func file_envoy_config_endpoint_v3_endpoint_proto_init() { + if File_envoy_config_endpoint_v3_endpoint_proto != nil { + return + } + file_envoy_config_endpoint_v3_endpoint_components_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment_Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment_Policy_DropOverload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_endpoint_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_endpoint_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_endpoint_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_endpoint_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_endpoint_proto = out.File + file_envoy_config_endpoint_v3_endpoint_proto_rawDesc = nil + file_envoy_config_endpoint_v3_endpoint_proto_goTypes = nil + file_envoy_config_endpoint_v3_endpoint_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go new file mode 100644 index 000000000..1b9f2c1fe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go @@ -0,0 +1,589 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterLoadAssignment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterLoadAssignment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterLoadAssignmentMultiError, or nil if none found. +func (m *ClusterLoadAssignment) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := ClusterLoadAssignmentValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + { + sorted_keys := make([]string, len(m.GetNamedEndpoints())) + i := 0 + for key := range m.GetNamedEndpoints() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetNamedEndpoints()[key] + _ = val + + // no validation rules for NamedEndpoints[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterLoadAssignmentMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignmentMultiError is an error wrapping multiple validation +// errors returned by ClusterLoadAssignment.ValidateAll() if the designated +// constraints aren't met. +type ClusterLoadAssignmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignmentMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignmentValidationError is the validation error returned by +// ClusterLoadAssignment.Validate if the designated constraints aren't met. +type ClusterLoadAssignmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignmentValidationError) ErrorName() string { + return "ClusterLoadAssignmentValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignmentValidationError{} + +// Validate checks the field values on ClusterLoadAssignment_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment_Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterLoadAssignment_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterLoadAssignment_PolicyMultiError, or nil if none found. +func (m *ClusterLoadAssignment_Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment_Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDropOverloads() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetOverprovisioningFactor(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := ClusterLoadAssignment_PolicyValidationError{ + field: "OverprovisioningFactor", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if d := m.GetEndpointStaleAfter(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterLoadAssignment_PolicyValidationError{ + field: "EndpointStaleAfter", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterLoadAssignment_PolicyValidationError{ + field: "EndpointStaleAfter", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for WeightedPriorityHealth + + if len(errors) > 0 { + return ClusterLoadAssignment_PolicyMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignment_PolicyMultiError is an error wrapping multiple +// validation errors returned by ClusterLoadAssignment_Policy.ValidateAll() if +// the designated constraints aren't met. +type ClusterLoadAssignment_PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignment_PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignment_PolicyMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignment_PolicyValidationError is the validation error returned +// by ClusterLoadAssignment_Policy.Validate if the designated constraints +// aren't met. +type ClusterLoadAssignment_PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignment_PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignment_PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignment_PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignment_PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignment_PolicyValidationError) ErrorName() string { + return "ClusterLoadAssignment_PolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignment_PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment_Policy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignment_PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignment_PolicyValidationError{} + +// Validate checks the field values on +// ClusterLoadAssignment_Policy_DropOverload with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment_Policy_DropOverload) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ClusterLoadAssignment_Policy_DropOverload with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ClusterLoadAssignment_Policy_DropOverloadMultiError, or nil if none found. +func (m *ClusterLoadAssignment_Policy_DropOverload) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCategory()) < 1 { + err := ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "Category", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDropPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDropPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterLoadAssignment_Policy_DropOverloadMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignment_Policy_DropOverloadMultiError is an error wrapping +// multiple validation errors returned by +// ClusterLoadAssignment_Policy_DropOverload.ValidateAll() if the designated +// constraints aren't met. +type ClusterLoadAssignment_Policy_DropOverloadMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignment_Policy_DropOverloadMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignment_Policy_DropOverloadMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignment_Policy_DropOverloadValidationError is the validation +// error returned by ClusterLoadAssignment_Policy_DropOverload.Validate if the +// designated constraints aren't met. +type ClusterLoadAssignment_Policy_DropOverloadValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) ErrorName() string { + return "ClusterLoadAssignment_Policy_DropOverloadValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment_Policy_DropOverload.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignment_Policy_DropOverloadValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignment_Policy_DropOverloadValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go new file mode 100644 index 000000000..4cff3e6df --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go @@ -0,0 +1,1004 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Upstream host identifier. +type Endpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream host address. + // + // .. attention:: + // + // The form of host address depends on the given cluster type. For STATIC or EDS, + // it is expected to be a direct IP address (or something resolvable by the + // specified :ref:`resolver ` + // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, + // and will be resolved via DNS. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // The optional health check configuration is used as configuration for the + // health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig *Endpoint_HealthCheckConfig `protobuf:"bytes,2,opt,name=health_check_config,json=healthCheckConfig,proto3" json:"health_check_config,omitempty"` + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"` + // An ordered list of addresses that together with “address“ comprise the + // list of addresses for an endpoint. The address given in the “address“ is + // prepended to this list. It is assumed that the list must already be + // sorted by preference order of the addresses. This will only be supported + // for STATIC and EDS clusters. + AdditionalAddresses []*Endpoint_AdditionalAddress `protobuf:"bytes,4,rep,name=additional_addresses,json=additionalAddresses,proto3" json:"additional_addresses,omitempty"` +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0} +} + +func (x *Endpoint) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Endpoint) GetHealthCheckConfig() *Endpoint_HealthCheckConfig { + if x != nil { + return x.HealthCheckConfig + } + return nil +} + +func (x *Endpoint) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *Endpoint) GetAdditionalAddresses() []*Endpoint_AdditionalAddress { + if x != nil { + return x.AdditionalAddresses + } + return nil +} + +// An Endpoint that Envoy can route traffic to. +// [#next-free-field: 6] +type LbEndpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Upstream host identifier or a named reference. + // + // Types that are assignable to HostIdentifier: + // + // *LbEndpoint_Endpoint + // *LbEndpoint_EndpointName + HostIdentifier isLbEndpoint_HostIdentifier `protobuf_oneof:"host_identifier"` + // Optional health status when known and supplied by EDS server. + HealthStatus v3.HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"health_status,omitempty"` + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as “envoy.lb“. An example boolean key-value pair + // is “canary“, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + Metadata *v3.Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, will be treated as 1. The sum + // of the weights of all endpoints in the endpoint's locality must not + // exceed uint32_t maximal value (4294967295). + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` +} + +func (x *LbEndpoint) Reset() { + *x = LbEndpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LbEndpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LbEndpoint) ProtoMessage() {} + +func (x *LbEndpoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LbEndpoint.ProtoReflect.Descriptor instead. +func (*LbEndpoint) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{1} +} + +func (m *LbEndpoint) GetHostIdentifier() isLbEndpoint_HostIdentifier { + if m != nil { + return m.HostIdentifier + } + return nil +} + +func (x *LbEndpoint) GetEndpoint() *Endpoint { + if x, ok := x.GetHostIdentifier().(*LbEndpoint_Endpoint); ok { + return x.Endpoint + } + return nil +} + +func (x *LbEndpoint) GetEndpointName() string { + if x, ok := x.GetHostIdentifier().(*LbEndpoint_EndpointName); ok { + return x.EndpointName + } + return "" +} + +func (x *LbEndpoint) GetHealthStatus() v3.HealthStatus { + if x != nil { + return x.HealthStatus + } + return v3.HealthStatus(0) +} + +func (x *LbEndpoint) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *LbEndpoint) GetLoadBalancingWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.LoadBalancingWeight + } + return nil +} + +type isLbEndpoint_HostIdentifier interface { + isLbEndpoint_HostIdentifier() +} + +type LbEndpoint_Endpoint struct { + Endpoint *Endpoint `protobuf:"bytes,1,opt,name=endpoint,proto3,oneof"` +} + +type LbEndpoint_EndpointName struct { + // [#not-implemented-hide:] + EndpointName string `protobuf:"bytes,5,opt,name=endpoint_name,json=endpointName,proto3,oneof"` +} + +func (*LbEndpoint_Endpoint) isLbEndpoint_HostIdentifier() {} + +func (*LbEndpoint_EndpointName) isLbEndpoint_HostIdentifier() {} + +// [#not-implemented-hide:] +// A configuration for a LEDS collection. +type LedsClusterLocalityConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for the source of LEDS updates for a Locality. + LedsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=leds_config,json=ledsConfig,proto3" json:"leds_config,omitempty"` + // The xDS transport protocol glob collection resource name. + // The service is only supported in delta xDS (incremental) mode. + LedsCollectionName string `protobuf:"bytes,2,opt,name=leds_collection_name,json=ledsCollectionName,proto3" json:"leds_collection_name,omitempty"` +} + +func (x *LedsClusterLocalityConfig) Reset() { + *x = LedsClusterLocalityConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LedsClusterLocalityConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LedsClusterLocalityConfig) ProtoMessage() {} + +func (x *LedsClusterLocalityConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LedsClusterLocalityConfig.ProtoReflect.Descriptor instead. +func (*LedsClusterLocalityConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{2} +} + +func (x *LedsClusterLocalityConfig) GetLedsConfig() *v3.ConfigSource { + if x != nil { + return x.LedsConfig + } + return nil +} + +func (x *LedsClusterLocalityConfig) GetLedsCollectionName() string { + if x != nil { + return x.LedsCollectionName + } + return "" +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but only if +// they have different priorities. +// [#next-free-field: 10] +type LocalityLbEndpoints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies location of where the upstream hosts run. + Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` + // Metadata to provide additional information about the locality endpoints in aggregate. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The group of endpoints belonging to the locality specified. + // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + // deprecated and replaced by “load_balancer_endpoints“.] + LbEndpoints []*LbEndpoint `protobuf:"bytes,2,rep,name=lb_endpoints,json=lbEndpoints,proto3" json:"lb_endpoints,omitempty"` + // [#not-implemented-hide:] + // + // Types that are assignable to LbConfig: + // + // *LocalityLbEndpoints_LoadBalancerEndpoints + // *LocalityLbEndpoints_LedsClusterLocalityConfig + LbConfig isLocalityLbEndpoints_LbConfig `protobuf_oneof:"lb_config"` + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. The sum of the weights of all localities at + // the same priority level must not exceed uint32_t maximal value (4294967295). + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specified when locality weighted load balancing is enabled, the locality is + // assigned no load. + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event that enough endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. Read more at :ref:`priority levels `. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + Priority uint32 `protobuf:"varint,5,opt,name=priority,proto3" json:"priority,omitempty"` + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + Proximity *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=proximity,proto3" json:"proximity,omitempty"` +} + +func (x *LocalityLbEndpoints) Reset() { + *x = LocalityLbEndpoints{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbEndpoints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbEndpoints) ProtoMessage() {} + +func (x *LocalityLbEndpoints) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbEndpoints.ProtoReflect.Descriptor instead. +func (*LocalityLbEndpoints) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{3} +} + +func (x *LocalityLbEndpoints) GetLocality() *v3.Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *LocalityLbEndpoints) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *LocalityLbEndpoints) GetLbEndpoints() []*LbEndpoint { + if x != nil { + return x.LbEndpoints + } + return nil +} + +func (m *LocalityLbEndpoints) GetLbConfig() isLocalityLbEndpoints_LbConfig { + if m != nil { + return m.LbConfig + } + return nil +} + +func (x *LocalityLbEndpoints) GetLoadBalancerEndpoints() *LocalityLbEndpoints_LbEndpointList { + if x, ok := x.GetLbConfig().(*LocalityLbEndpoints_LoadBalancerEndpoints); ok { + return x.LoadBalancerEndpoints + } + return nil +} + +func (x *LocalityLbEndpoints) GetLedsClusterLocalityConfig() *LedsClusterLocalityConfig { + if x, ok := x.GetLbConfig().(*LocalityLbEndpoints_LedsClusterLocalityConfig); ok { + return x.LedsClusterLocalityConfig + } + return nil +} + +func (x *LocalityLbEndpoints) GetLoadBalancingWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.LoadBalancingWeight + } + return nil +} + +func (x *LocalityLbEndpoints) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *LocalityLbEndpoints) GetProximity() *wrapperspb.UInt32Value { + if x != nil { + return x.Proximity + } + return nil +} + +type isLocalityLbEndpoints_LbConfig interface { + isLocalityLbEndpoints_LbConfig() +} + +type LocalityLbEndpoints_LoadBalancerEndpoints struct { + // The group of endpoints belonging to the locality. + // [#comment:TODO(adisuissa): Once LEDS is implemented the “lb_endpoints“ field + // needs to be deprecated.] + LoadBalancerEndpoints *LocalityLbEndpoints_LbEndpointList `protobuf:"bytes,7,opt,name=load_balancer_endpoints,json=loadBalancerEndpoints,proto3,oneof"` +} + +type LocalityLbEndpoints_LedsClusterLocalityConfig struct { + // LEDS Configuration for the current locality. + LedsClusterLocalityConfig *LedsClusterLocalityConfig `protobuf:"bytes,8,opt,name=leds_cluster_locality_config,json=ledsClusterLocalityConfig,proto3,oneof"` +} + +func (*LocalityLbEndpoints_LoadBalancerEndpoints) isLocalityLbEndpoints_LbConfig() {} + +func (*LocalityLbEndpoints_LedsClusterLocalityConfig) isLocalityLbEndpoints_LbConfig() {} + +// The optional health check configuration. +type Endpoint_HealthCheckConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + PortValue uint32 `protobuf:"varint,1,opt,name=port_value,json=portValue,proto3" json:"port_value,omitempty"` + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Optional alternative health check host address. + // + // .. attention:: + // + // The form of the health check host address is expected to be a direct IP address. + Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + // Optional flag to control if perform active health check for this endpoint. + // Active health check is enabled by default if there is a health checker. + DisableActiveHealthCheck bool `protobuf:"varint,4,opt,name=disable_active_health_check,json=disableActiveHealthCheck,proto3" json:"disable_active_health_check,omitempty"` +} + +func (x *Endpoint_HealthCheckConfig) Reset() { + *x = Endpoint_HealthCheckConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint_HealthCheckConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint_HealthCheckConfig) ProtoMessage() {} + +func (x *Endpoint_HealthCheckConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint_HealthCheckConfig.ProtoReflect.Descriptor instead. +func (*Endpoint_HealthCheckConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Endpoint_HealthCheckConfig) GetPortValue() uint32 { + if x != nil { + return x.PortValue + } + return 0 +} + +func (x *Endpoint_HealthCheckConfig) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *Endpoint_HealthCheckConfig) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Endpoint_HealthCheckConfig) GetDisableActiveHealthCheck() bool { + if x != nil { + return x.DisableActiveHealthCheck + } + return false +} + +type Endpoint_AdditionalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Additional address that is associated with the endpoint. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *Endpoint_AdditionalAddress) Reset() { + *x = Endpoint_AdditionalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint_AdditionalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint_AdditionalAddress) ProtoMessage() {} + +func (x *Endpoint_AdditionalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint_AdditionalAddress.ProtoReflect.Descriptor instead. +func (*Endpoint_AdditionalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Endpoint_AdditionalAddress) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +// [#not-implemented-hide:] +// A list of endpoints of a specific locality. +type LocalityLbEndpoints_LbEndpointList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LbEndpoints []*LbEndpoint `protobuf:"bytes,1,rep,name=lb_endpoints,json=lbEndpoints,proto3" json:"lb_endpoints,omitempty"` +} + +func (x *LocalityLbEndpoints_LbEndpointList) Reset() { + *x = LocalityLbEndpoints_LbEndpointList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbEndpoints_LbEndpointList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbEndpoints_LbEndpointList) ProtoMessage() {} + +func (x *LocalityLbEndpoints_LbEndpointList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbEndpoints_LbEndpointList.ProtoReflect.Descriptor instead. +func (*LocalityLbEndpoints_LbEndpointList) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *LocalityLbEndpoints_LbEndpointList) GetLbEndpoints() []*LbEndpoint { + if x != nil { + return x.LbEndpoints + } + return nil +} + +var File_envoy_config_endpoint_v3_endpoint_components_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xb0, 0x05, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x41, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x13, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x1a, 0x8a, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, + 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x4c, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x25, 0x9a, + 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x91, 0x03, 0x0a, 0x0a, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x3a, 0x27, 0x9a, 0xc5, + 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x4c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x0b, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x0a, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x6c, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x64, 0x73, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x06, + 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, + 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, + 0x0a, 0x1c, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x19, 0x6c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x24, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x08, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, + 0x6d, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, + 0x69, 0x74, 0x79, 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x3a, 0x30, + 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x42, 0x17, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData = file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes = []interface{}{ + (*Endpoint)(nil), // 0: envoy.config.endpoint.v3.Endpoint + (*LbEndpoint)(nil), // 1: envoy.config.endpoint.v3.LbEndpoint + (*LedsClusterLocalityConfig)(nil), // 2: envoy.config.endpoint.v3.LedsClusterLocalityConfig + (*LocalityLbEndpoints)(nil), // 3: envoy.config.endpoint.v3.LocalityLbEndpoints + (*Endpoint_HealthCheckConfig)(nil), // 4: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig + (*Endpoint_AdditionalAddress)(nil), // 5: envoy.config.endpoint.v3.Endpoint.AdditionalAddress + (*LocalityLbEndpoints_LbEndpointList)(nil), // 6: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList + (*v3.Address)(nil), // 7: envoy.config.core.v3.Address + (v3.HealthStatus)(0), // 8: envoy.config.core.v3.HealthStatus + (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*v3.ConfigSource)(nil), // 11: envoy.config.core.v3.ConfigSource + (*v3.Locality)(nil), // 12: envoy.config.core.v3.Locality +} +var file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = []int32{ + 7, // 0: envoy.config.endpoint.v3.Endpoint.address:type_name -> envoy.config.core.v3.Address + 4, // 1: envoy.config.endpoint.v3.Endpoint.health_check_config:type_name -> envoy.config.endpoint.v3.Endpoint.HealthCheckConfig + 5, // 2: envoy.config.endpoint.v3.Endpoint.additional_addresses:type_name -> envoy.config.endpoint.v3.Endpoint.AdditionalAddress + 0, // 3: envoy.config.endpoint.v3.LbEndpoint.endpoint:type_name -> envoy.config.endpoint.v3.Endpoint + 8, // 4: envoy.config.endpoint.v3.LbEndpoint.health_status:type_name -> envoy.config.core.v3.HealthStatus + 9, // 5: envoy.config.endpoint.v3.LbEndpoint.metadata:type_name -> envoy.config.core.v3.Metadata + 10, // 6: envoy.config.endpoint.v3.LbEndpoint.load_balancing_weight:type_name -> google.protobuf.UInt32Value + 11, // 7: envoy.config.endpoint.v3.LedsClusterLocalityConfig.leds_config:type_name -> envoy.config.core.v3.ConfigSource + 12, // 8: envoy.config.endpoint.v3.LocalityLbEndpoints.locality:type_name -> envoy.config.core.v3.Locality + 9, // 9: envoy.config.endpoint.v3.LocalityLbEndpoints.metadata:type_name -> envoy.config.core.v3.Metadata + 1, // 10: envoy.config.endpoint.v3.LocalityLbEndpoints.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 6, // 11: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancer_endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList + 2, // 12: envoy.config.endpoint.v3.LocalityLbEndpoints.leds_cluster_locality_config:type_name -> envoy.config.endpoint.v3.LedsClusterLocalityConfig + 10, // 13: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight:type_name -> google.protobuf.UInt32Value + 10, // 14: envoy.config.endpoint.v3.LocalityLbEndpoints.proximity:type_name -> google.protobuf.UInt32Value + 7, // 15: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig.address:type_name -> envoy.config.core.v3.Address + 7, // 16: envoy.config.endpoint.v3.Endpoint.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 1, // 17: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_endpoint_components_proto_init() } +func file_envoy_config_endpoint_v3_endpoint_components_proto_init() { + if File_envoy_config_endpoint_v3_endpoint_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LbEndpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LedsClusterLocalityConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbEndpoints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint_HealthCheckConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint_AdditionalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbEndpoints_LbEndpointList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*LbEndpoint_Endpoint)(nil), + (*LbEndpoint_EndpointName)(nil), + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*LocalityLbEndpoints_LoadBalancerEndpoints)(nil), + (*LocalityLbEndpoints_LedsClusterLocalityConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_endpoint_components_proto = out.File + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = nil + file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes = nil + file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go new file mode 100644 index 000000000..f11d8c508 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go @@ -0,0 +1,1322 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.HealthStatus(0) +) + +// Validate checks the field values on Endpoint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Endpoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EndpointMultiError, or nil +// if none found. +func (m *Endpoint) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHealthCheckConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthCheckConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Hostname + + for idx, item := range m.GetAdditionalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EndpointMultiError(errors) + } + + return nil +} + +// EndpointMultiError is an error wrapping multiple validation errors returned +// by Endpoint.ValidateAll() if the designated constraints aren't met. +type EndpointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointMultiError) AllErrors() []error { return m } + +// EndpointValidationError is the validation error returned by +// Endpoint.Validate if the designated constraints aren't met. +type EndpointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointValidationError) ErrorName() string { return "EndpointValidationError" } + +// Error satisfies the builtin error interface +func (e EndpointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointValidationError{} + +// Validate checks the field values on LbEndpoint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LbEndpoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LbEndpoint with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LbEndpointMultiError, or +// nil if none found. +func (m *LbEndpoint) ValidateAll() error { + return m.validate(true) +} + +func (m *LbEndpoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for HealthStatus + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetLoadBalancingWeight(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := LbEndpointValidationError{ + field: "LoadBalancingWeight", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + switch v := m.HostIdentifier.(type) { + case *LbEndpoint_Endpoint: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpoint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpoint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LbEndpoint_EndpointName: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for EndpointName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return LbEndpointMultiError(errors) + } + + return nil +} + +// LbEndpointMultiError is an error wrapping multiple validation errors +// returned by LbEndpoint.ValidateAll() if the designated constraints aren't met. +type LbEndpointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LbEndpointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LbEndpointMultiError) AllErrors() []error { return m } + +// LbEndpointValidationError is the validation error returned by +// LbEndpoint.Validate if the designated constraints aren't met. +type LbEndpointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LbEndpointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LbEndpointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LbEndpointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LbEndpointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LbEndpointValidationError) ErrorName() string { return "LbEndpointValidationError" } + +// Error satisfies the builtin error interface +func (e LbEndpointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLbEndpoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LbEndpointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LbEndpointValidationError{} + +// Validate checks the field values on LedsClusterLocalityConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LedsClusterLocalityConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LedsClusterLocalityConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LedsClusterLocalityConfigMultiError, or nil if none found. +func (m *LedsClusterLocalityConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LedsClusterLocalityConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLedsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLedsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for LedsCollectionName + + if len(errors) > 0 { + return LedsClusterLocalityConfigMultiError(errors) + } + + return nil +} + +// LedsClusterLocalityConfigMultiError is an error wrapping multiple validation +// errors returned by LedsClusterLocalityConfig.ValidateAll() if the +// designated constraints aren't met. +type LedsClusterLocalityConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LedsClusterLocalityConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LedsClusterLocalityConfigMultiError) AllErrors() []error { return m } + +// LedsClusterLocalityConfigValidationError is the validation error returned by +// LedsClusterLocalityConfig.Validate if the designated constraints aren't met. +type LedsClusterLocalityConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LedsClusterLocalityConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LedsClusterLocalityConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LedsClusterLocalityConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LedsClusterLocalityConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LedsClusterLocalityConfigValidationError) ErrorName() string { + return "LedsClusterLocalityConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LedsClusterLocalityConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLedsClusterLocalityConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LedsClusterLocalityConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LedsClusterLocalityConfigValidationError{} + +// Validate checks the field values on LocalityLbEndpoints with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LocalityLbEndpoints) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbEndpoints with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalityLbEndpointsMultiError, or nil if none found. +func (m *LocalityLbEndpoints) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbEndpoints) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetLbEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetLoadBalancingWeight(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := LocalityLbEndpointsValidationError{ + field: "LoadBalancingWeight", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if m.GetPriority() > 128 { + err := LocalityLbEndpointsValidationError{ + field: "Priority", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetProximity()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProximity()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.LbConfig.(type) { + case *LocalityLbEndpoints_LoadBalancerEndpoints: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLoadBalancerEndpoints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadBalancerEndpoints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LocalityLbEndpoints_LedsClusterLocalityConfig: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLedsClusterLocalityConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLedsClusterLocalityConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return LocalityLbEndpointsMultiError(errors) + } + + return nil +} + +// LocalityLbEndpointsMultiError is an error wrapping multiple validation +// errors returned by LocalityLbEndpoints.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbEndpointsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbEndpointsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbEndpointsMultiError) AllErrors() []error { return m } + +// LocalityLbEndpointsValidationError is the validation error returned by +// LocalityLbEndpoints.Validate if the designated constraints aren't met. +type LocalityLbEndpointsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbEndpointsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbEndpointsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbEndpointsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbEndpointsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbEndpointsValidationError) ErrorName() string { + return "LocalityLbEndpointsValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbEndpointsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbEndpoints.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbEndpointsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbEndpointsValidationError{} + +// Validate checks the field values on Endpoint_HealthCheckConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Endpoint_HealthCheckConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint_HealthCheckConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Endpoint_HealthCheckConfigMultiError, or nil if none found. +func (m *Endpoint_HealthCheckConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint_HealthCheckConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPortValue() > 65535 { + err := Endpoint_HealthCheckConfigValidationError{ + field: "PortValue", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Hostname + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableActiveHealthCheck + + if len(errors) > 0 { + return Endpoint_HealthCheckConfigMultiError(errors) + } + + return nil +} + +// Endpoint_HealthCheckConfigMultiError is an error wrapping multiple +// validation errors returned by Endpoint_HealthCheckConfig.ValidateAll() if +// the designated constraints aren't met. +type Endpoint_HealthCheckConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Endpoint_HealthCheckConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Endpoint_HealthCheckConfigMultiError) AllErrors() []error { return m } + +// Endpoint_HealthCheckConfigValidationError is the validation error returned +// by Endpoint_HealthCheckConfig.Validate if the designated constraints aren't met. +type Endpoint_HealthCheckConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Endpoint_HealthCheckConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Endpoint_HealthCheckConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Endpoint_HealthCheckConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Endpoint_HealthCheckConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Endpoint_HealthCheckConfigValidationError) ErrorName() string { + return "Endpoint_HealthCheckConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Endpoint_HealthCheckConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint_HealthCheckConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Endpoint_HealthCheckConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Endpoint_HealthCheckConfigValidationError{} + +// Validate checks the field values on Endpoint_AdditionalAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Endpoint_AdditionalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint_AdditionalAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Endpoint_AdditionalAddressMultiError, or nil if none found. +func (m *Endpoint_AdditionalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint_AdditionalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Endpoint_AdditionalAddressMultiError(errors) + } + + return nil +} + +// Endpoint_AdditionalAddressMultiError is an error wrapping multiple +// validation errors returned by Endpoint_AdditionalAddress.ValidateAll() if +// the designated constraints aren't met. +type Endpoint_AdditionalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Endpoint_AdditionalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Endpoint_AdditionalAddressMultiError) AllErrors() []error { return m } + +// Endpoint_AdditionalAddressValidationError is the validation error returned +// by Endpoint_AdditionalAddress.Validate if the designated constraints aren't met. +type Endpoint_AdditionalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Endpoint_AdditionalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Endpoint_AdditionalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Endpoint_AdditionalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Endpoint_AdditionalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Endpoint_AdditionalAddressValidationError) ErrorName() string { + return "Endpoint_AdditionalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e Endpoint_AdditionalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint_AdditionalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Endpoint_AdditionalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Endpoint_AdditionalAddressValidationError{} + +// Validate checks the field values on LocalityLbEndpoints_LbEndpointList with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *LocalityLbEndpoints_LbEndpointList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbEndpoints_LbEndpointList +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// LocalityLbEndpoints_LbEndpointListMultiError, or nil if none found. +func (m *LocalityLbEndpoints_LbEndpointList) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbEndpoints_LbEndpointList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetLbEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LocalityLbEndpoints_LbEndpointListMultiError(errors) + } + + return nil +} + +// LocalityLbEndpoints_LbEndpointListMultiError is an error wrapping multiple +// validation errors returned by +// LocalityLbEndpoints_LbEndpointList.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbEndpoints_LbEndpointListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbEndpoints_LbEndpointListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbEndpoints_LbEndpointListMultiError) AllErrors() []error { return m } + +// LocalityLbEndpoints_LbEndpointListValidationError is the validation error +// returned by LocalityLbEndpoints_LbEndpointList.Validate if the designated +// constraints aren't met. +type LocalityLbEndpoints_LbEndpointListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbEndpoints_LbEndpointListValidationError) ErrorName() string { + return "LocalityLbEndpoints_LbEndpointListValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbEndpoints_LbEndpointListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbEndpoints_LbEndpointList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbEndpoints_LbEndpointListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbEndpoints_LbEndpointListValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go new file mode 100644 index 000000000..b86ffd0d1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go @@ -0,0 +1,896 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Endpoint_HealthCheckConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_HealthCheckConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_HealthCheckConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableActiveHealthCheck { + i-- + if m.DisableActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + } + if m.PortValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Endpoint_AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Endpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + } + if m.HealthCheckConfig != nil { + size, err := m.HealthCheckConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LbEndpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_EndpointName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.HealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HealthStatus)) + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_Endpoint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint_Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Endpoint != nil { + size, err := m.Endpoint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LbEndpoint_EndpointName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_EndpointName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EndpointName) + copy(dAtA[i:], m.EndpointName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointName))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *LedsClusterLocalityConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LedsCollectionName) > 0 { + i -= len(m.LedsCollectionName) + copy(dAtA[i:], m.LedsCollectionName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LedsCollectionName))) + i-- + dAtA[i] = 0x12 + } + if m.LedsConfig != nil { + if vtmsg, ok := interface{}(m.LedsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LedsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LedsClusterLocalityConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LoadBalancerEndpoints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Proximity != nil { + size, err := (*wrapperspb.UInt32Value)(m.Proximity).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadBalancerEndpoints != nil { + size, err := m.LoadBalancerEndpoints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LedsClusterLocalityConfig != nil { + size, err := m.LedsClusterLocalityConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Endpoint_HealthCheckConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableActiveHealthCheck { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint_AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthCheckConfig != nil { + l = m.HealthCheckConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HostIdentifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.HealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HealthStatus)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint_Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Endpoint != nil { + l = m.Endpoint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LbEndpoint_EndpointName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EndpointName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsConfig != nil { + if size, ok := interface{}(m.LedsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LedsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LedsCollectionName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LbEndpointList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.Proximity != nil { + l = (*wrapperspb.UInt32Value)(m.Proximity).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadBalancerEndpoints != nil { + l = m.LoadBalancerEndpoints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsClusterLocalityConfig != nil { + l = m.LedsClusterLocalityConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go new file mode 100644 index 000000000..368eefe03 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go @@ -0,0 +1,331 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DropPercentage != nil { + if vtmsg, ok := interface{}(m.DropPercentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DropPercentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WeightedPriorityHealth { + i-- + if m.WeightedPriorityHealth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EndpointStaleAfter != nil { + size, err := (*durationpb.Duration)(m.EndpointStaleAfter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.OverprovisioningFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.DropOverloads) > 0 { + for iNdEx := len(m.DropOverloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DropOverloads[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NamedEndpoints) > 0 { + for k := range m.NamedEndpoints { + v := m.NamedEndpoints[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Policy != nil { + size, err := m.Policy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Endpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DropPercentage != nil { + if size, ok := interface{}(m.DropPercentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DropPercentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DropOverloads) > 0 { + for _, e := range m.DropOverloads { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverprovisioningFactor != nil { + l = (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointStaleAfter != nil { + l = (*durationpb.Duration)(m.EndpointStaleAfter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightedPriorityHealth { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Policy != nil { + l = m.Policy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NamedEndpoints) > 0 { + for k, v := range m.NamedEndpoints { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go new file mode 100644 index 000000000..07d96da49 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go @@ -0,0 +1,978 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// These are stats Envoy reports to the management server at a frequency defined by +// :ref:`LoadStatsResponse.load_reporting_interval`. +// Stats per upstream region/zone and optionally per subzone. +// [#next-free-field: 15] +type UpstreamLocalityStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` + // The total number of requests successfully completed by the endpoints in the + // locality. + TotalSuccessfulRequests uint64 `protobuf:"varint,2,opt,name=total_successful_requests,json=totalSuccessfulRequests,proto3" json:"total_successful_requests,omitempty"` + // The total number of unfinished requests + TotalRequestsInProgress uint64 `protobuf:"varint,3,opt,name=total_requests_in_progress,json=totalRequestsInProgress,proto3" json:"total_requests_in_progress,omitempty"` + // The total number of requests that failed due to errors at the endpoint, + // aggregated over all endpoints in the locality. + TotalErrorRequests uint64 `protobuf:"varint,4,opt,name=total_error_requests,json=totalErrorRequests,proto3" json:"total_error_requests,omitempty"` + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. + TotalIssuedRequests uint64 `protobuf:"varint,8,opt,name=total_issued_requests,json=totalIssuedRequests,proto3" json:"total_issued_requests,omitempty"` + // The total number of connections in an established state at the time of the + // report. This field is aggregated over all the upstream endpoints in the + // locality. + // In Envoy, this information may be based on “upstream_cx_active metric“. + // [#not-implemented-hide:] + TotalActiveConnections uint64 `protobuf:"varint,9,opt,name=total_active_connections,json=totalActiveConnections,proto3" json:"total_active_connections,omitempty"` + // The total number of connections opened since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_total“ metric + // compared to itself between start and end of an interval, i.e. + // “upstream_cx_total“(now) - “upstream_cx_total“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalNewConnections uint64 `protobuf:"varint,10,opt,name=total_new_connections,json=totalNewConnections,proto3" json:"total_new_connections,omitempty"` + // The total number of connection failures since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_connect_fail“ + // metric compared to itself between start and end of an interval, i.e. + // “upstream_cx_connect_fail“(now) - “upstream_cx_connect_fail“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalFailConnections uint64 `protobuf:"varint,11,opt,name=total_fail_connections,json=totalFailConnections,proto3" json:"total_fail_connections,omitempty"` + // CPU utilization stats for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + CpuUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,12,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + // Memory utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + MemUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,13,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` + // Blended application-defined utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + ApplicationUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,14,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` + // Named stats for multi-dimensional load balancing. + // These typically come from endpoint metrics reported via ORCA. + LoadMetricStats []*EndpointLoadMetricStats `protobuf:"bytes,5,rep,name=load_metric_stats,json=loadMetricStats,proto3" json:"load_metric_stats,omitempty"` + // Endpoint granularity stats information for this locality. This information + // is populated if the Server requests it by setting + // :ref:`LoadStatsResponse.report_endpoint_granularity`. + UpstreamEndpointStats []*UpstreamEndpointStats `protobuf:"bytes,7,rep,name=upstream_endpoint_stats,json=upstreamEndpointStats,proto3" json:"upstream_endpoint_stats,omitempty"` + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + Priority uint32 `protobuf:"varint,6,opt,name=priority,proto3" json:"priority,omitempty"` +} + +func (x *UpstreamLocalityStats) Reset() { + *x = UpstreamLocalityStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamLocalityStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamLocalityStats) ProtoMessage() {} + +func (x *UpstreamLocalityStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamLocalityStats.ProtoReflect.Descriptor instead. +func (*UpstreamLocalityStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{0} +} + +func (x *UpstreamLocalityStats) GetLocality() *v3.Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *UpstreamLocalityStats) GetTotalSuccessfulRequests() uint64 { + if x != nil { + return x.TotalSuccessfulRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalRequestsInProgress() uint64 { + if x != nil { + return x.TotalRequestsInProgress + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalErrorRequests() uint64 { + if x != nil { + return x.TotalErrorRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalIssuedRequests() uint64 { + if x != nil { + return x.TotalIssuedRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalActiveConnections() uint64 { + if x != nil { + return x.TotalActiveConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalNewConnections() uint64 { + if x != nil { + return x.TotalNewConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalFailConnections() uint64 { + if x != nil { + return x.TotalFailConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetCpuUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.CpuUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetMemUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.MemUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetApplicationUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.ApplicationUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetLoadMetricStats() []*EndpointLoadMetricStats { + if x != nil { + return x.LoadMetricStats + } + return nil +} + +func (x *UpstreamLocalityStats) GetUpstreamEndpointStats() []*UpstreamEndpointStats { + if x != nil { + return x.UpstreamEndpointStats + } + return nil +} + +func (x *UpstreamLocalityStats) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +// [#next-free-field: 8] +type UpstreamEndpointStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Upstream host address. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Opaque and implementation dependent metadata of the + // endpoint. Envoy will pass this directly to the management server. + Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The total number of requests successfully completed by the endpoints in the + // locality. These include non-5xx responses for HTTP, where errors + // originate at the client and the endpoint responded successfully. For gRPC, + // the grpc-status values are those not covered by total_error_requests below. + TotalSuccessfulRequests uint64 `protobuf:"varint,2,opt,name=total_successful_requests,json=totalSuccessfulRequests,proto3" json:"total_successful_requests,omitempty"` + // The total number of unfinished requests for this endpoint. + TotalRequestsInProgress uint64 `protobuf:"varint,3,opt,name=total_requests_in_progress,json=totalRequestsInProgress,proto3" json:"total_requests_in_progress,omitempty"` + // The total number of requests that failed due to errors at the endpoint. + // For HTTP these are responses with 5xx status codes and for gRPC the + // grpc-status values: + // + // - DeadlineExceeded + // - Unimplemented + // - Internal + // - Unavailable + // - Unknown + // - DataLoss + TotalErrorRequests uint64 `protobuf:"varint,4,opt,name=total_error_requests,json=totalErrorRequests,proto3" json:"total_error_requests,omitempty"` + // The total number of requests that were issued to this endpoint + // since the last report. A single TCP connection, HTTP or gRPC + // request or stream is counted as one request. + TotalIssuedRequests uint64 `protobuf:"varint,7,opt,name=total_issued_requests,json=totalIssuedRequests,proto3" json:"total_issued_requests,omitempty"` + // Stats for multi-dimensional load balancing. + LoadMetricStats []*EndpointLoadMetricStats `protobuf:"bytes,5,rep,name=load_metric_stats,json=loadMetricStats,proto3" json:"load_metric_stats,omitempty"` +} + +func (x *UpstreamEndpointStats) Reset() { + *x = UpstreamEndpointStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamEndpointStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamEndpointStats) ProtoMessage() {} + +func (x *UpstreamEndpointStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamEndpointStats.ProtoReflect.Descriptor instead. +func (*UpstreamEndpointStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{1} +} + +func (x *UpstreamEndpointStats) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *UpstreamEndpointStats) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *UpstreamEndpointStats) GetTotalSuccessfulRequests() uint64 { + if x != nil { + return x.TotalSuccessfulRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalRequestsInProgress() uint64 { + if x != nil { + return x.TotalRequestsInProgress + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalErrorRequests() uint64 { + if x != nil { + return x.TotalErrorRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalIssuedRequests() uint64 { + if x != nil { + return x.TotalIssuedRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetLoadMetricStats() []*EndpointLoadMetricStats { + if x != nil { + return x.LoadMetricStats + } + return nil +} + +type EndpointLoadMetricStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the metric; may be empty. + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` + // Number of calls that finished and included this metric. + NumRequestsFinishedWithMetric uint64 `protobuf:"varint,2,opt,name=num_requests_finished_with_metric,json=numRequestsFinishedWithMetric,proto3" json:"num_requests_finished_with_metric,omitempty"` + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + TotalMetricValue float64 `protobuf:"fixed64,3,opt,name=total_metric_value,json=totalMetricValue,proto3" json:"total_metric_value,omitempty"` +} + +func (x *EndpointLoadMetricStats) Reset() { + *x = EndpointLoadMetricStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointLoadMetricStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointLoadMetricStats) ProtoMessage() {} + +func (x *EndpointLoadMetricStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointLoadMetricStats.ProtoReflect.Descriptor instead. +func (*EndpointLoadMetricStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{2} +} + +func (x *EndpointLoadMetricStats) GetMetricName() string { + if x != nil { + return x.MetricName + } + return "" +} + +func (x *EndpointLoadMetricStats) GetNumRequestsFinishedWithMetric() uint64 { + if x != nil { + return x.NumRequestsFinishedWithMetric + } + return 0 +} + +func (x *EndpointLoadMetricStats) GetTotalMetricValue() float64 { + if x != nil { + return x.TotalMetricValue + } + return 0 +} + +// Same as EndpointLoadMetricStats, except without the metric_name field. +type UnnamedEndpointLoadMetricStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of calls that finished and included this metric. + NumRequestsFinishedWithMetric uint64 `protobuf:"varint,1,opt,name=num_requests_finished_with_metric,json=numRequestsFinishedWithMetric,proto3" json:"num_requests_finished_with_metric,omitempty"` + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + TotalMetricValue float64 `protobuf:"fixed64,2,opt,name=total_metric_value,json=totalMetricValue,proto3" json:"total_metric_value,omitempty"` +} + +func (x *UnnamedEndpointLoadMetricStats) Reset() { + *x = UnnamedEndpointLoadMetricStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnnamedEndpointLoadMetricStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnnamedEndpointLoadMetricStats) ProtoMessage() {} + +func (x *UnnamedEndpointLoadMetricStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnnamedEndpointLoadMetricStats.ProtoReflect.Descriptor instead. +func (*UnnamedEndpointLoadMetricStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{3} +} + +func (x *UnnamedEndpointLoadMetricStats) GetNumRequestsFinishedWithMetric() uint64 { + if x != nil { + return x.NumRequestsFinishedWithMetric + } + return 0 +} + +func (x *UnnamedEndpointLoadMetricStats) GetTotalMetricValue() float64 { + if x != nil { + return x.TotalMetricValue + } + return 0 +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// Next ID: 7 +// [#next-free-field: 7] +type ClusterStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cluster. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + ClusterServiceName string `protobuf:"bytes,6,opt,name=cluster_service_name,json=clusterServiceName,proto3" json:"cluster_service_name,omitempty"` + // Need at least one. + UpstreamLocalityStats []*UpstreamLocalityStats `protobuf:"bytes,2,rep,name=upstream_locality_stats,json=upstreamLocalityStats,proto3" json:"upstream_locality_stats,omitempty"` + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + TotalDroppedRequests uint64 `protobuf:"varint,3,opt,name=total_dropped_requests,json=totalDroppedRequests,proto3" json:"total_dropped_requests,omitempty"` + // Information about deliberately dropped requests for each category specified + // in the DropOverload policy. + DroppedRequests []*ClusterStats_DroppedRequests `protobuf:"bytes,5,rep,name=dropped_requests,json=droppedRequests,proto3" json:"dropped_requests,omitempty"` + // Period over which the actual load report occurred. This will be guaranteed to include every + // request reported. Due to system load and delays between the “LoadStatsRequest“ sent from Envoy + // and the “LoadStatsResponse“ message sent from the management server, this may be longer than + // the requested load reporting interval in the “LoadStatsResponse“. + LoadReportInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=load_report_interval,json=loadReportInterval,proto3" json:"load_report_interval,omitempty"` +} + +func (x *ClusterStats) Reset() { + *x = ClusterStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStats) ProtoMessage() {} + +func (x *ClusterStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStats.ProtoReflect.Descriptor instead. +func (*ClusterStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4} +} + +func (x *ClusterStats) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterStats) GetClusterServiceName() string { + if x != nil { + return x.ClusterServiceName + } + return "" +} + +func (x *ClusterStats) GetUpstreamLocalityStats() []*UpstreamLocalityStats { + if x != nil { + return x.UpstreamLocalityStats + } + return nil +} + +func (x *ClusterStats) GetTotalDroppedRequests() uint64 { + if x != nil { + return x.TotalDroppedRequests + } + return 0 +} + +func (x *ClusterStats) GetDroppedRequests() []*ClusterStats_DroppedRequests { + if x != nil { + return x.DroppedRequests + } + return nil +} + +func (x *ClusterStats) GetLoadReportInterval() *durationpb.Duration { + if x != nil { + return x.LoadReportInterval + } + return nil +} + +type ClusterStats_DroppedRequests struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the policy specifying the drop. + Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` + // Total number of deliberately dropped requests for the category. + DroppedCount uint64 `protobuf:"varint,2,opt,name=dropped_count,json=droppedCount,proto3" json:"dropped_count,omitempty"` +} + +func (x *ClusterStats_DroppedRequests) Reset() { + *x = ClusterStats_DroppedRequests{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStats_DroppedRequests) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStats_DroppedRequests) ProtoMessage() {} + +func (x *ClusterStats_DroppedRequests) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStats_DroppedRequests.ProtoReflect.Descriptor instead. +func (*ClusterStats_DroppedRequests) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ClusterStats_DroppedRequests) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +func (x *ClusterStats_DroppedRequests) GetDroppedCount() uint64 { + if x != nil { + return x.DroppedCount + } + return 0 +} + +var File_envoy_config_endpoint_v3_load_report_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_load_report_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x08, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x19, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x18, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4e, 0x65, + 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x16, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x46, 0x61, 0x69, + 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x61, 0x0a, 0x0f, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x61, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, + 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x16, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf7, 0x03, + 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x19, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, + 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x2c, + 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x34, 0x9a, 0xc5, + 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x1e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x89, 0x05, + 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2a, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x71, 0x0a, 0x17, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x96, 0x01, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, + 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_load_report_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_load_report_proto_rawDescData = file_envoy_config_endpoint_v3_load_report_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_load_report_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_load_report_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_load_report_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_envoy_config_endpoint_v3_load_report_proto_goTypes = []interface{}{ + (*UpstreamLocalityStats)(nil), // 0: envoy.config.endpoint.v3.UpstreamLocalityStats + (*UpstreamEndpointStats)(nil), // 1: envoy.config.endpoint.v3.UpstreamEndpointStats + (*EndpointLoadMetricStats)(nil), // 2: envoy.config.endpoint.v3.EndpointLoadMetricStats + (*UnnamedEndpointLoadMetricStats)(nil), // 3: envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + (*ClusterStats)(nil), // 4: envoy.config.endpoint.v3.ClusterStats + (*ClusterStats_DroppedRequests)(nil), // 5: envoy.config.endpoint.v3.ClusterStats.DroppedRequests + (*v3.Locality)(nil), // 6: envoy.config.core.v3.Locality + (*v3.Address)(nil), // 7: envoy.config.core.v3.Address + (*structpb.Struct)(nil), // 8: google.protobuf.Struct + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_envoy_config_endpoint_v3_load_report_proto_depIdxs = []int32{ + 6, // 0: envoy.config.endpoint.v3.UpstreamLocalityStats.locality:type_name -> envoy.config.core.v3.Locality + 3, // 1: envoy.config.endpoint.v3.UpstreamLocalityStats.cpu_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 2: envoy.config.endpoint.v3.UpstreamLocalityStats.mem_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 3: envoy.config.endpoint.v3.UpstreamLocalityStats.application_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 2, // 4: envoy.config.endpoint.v3.UpstreamLocalityStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 1, // 5: envoy.config.endpoint.v3.UpstreamLocalityStats.upstream_endpoint_stats:type_name -> envoy.config.endpoint.v3.UpstreamEndpointStats + 7, // 6: envoy.config.endpoint.v3.UpstreamEndpointStats.address:type_name -> envoy.config.core.v3.Address + 8, // 7: envoy.config.endpoint.v3.UpstreamEndpointStats.metadata:type_name -> google.protobuf.Struct + 2, // 8: envoy.config.endpoint.v3.UpstreamEndpointStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 0, // 9: envoy.config.endpoint.v3.ClusterStats.upstream_locality_stats:type_name -> envoy.config.endpoint.v3.UpstreamLocalityStats + 5, // 10: envoy.config.endpoint.v3.ClusterStats.dropped_requests:type_name -> envoy.config.endpoint.v3.ClusterStats.DroppedRequests + 9, // 11: envoy.config.endpoint.v3.ClusterStats.load_report_interval:type_name -> google.protobuf.Duration + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_load_report_proto_init() } +func file_envoy_config_endpoint_v3_load_report_proto_init() { + if File_envoy_config_endpoint_v3_load_report_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamLocalityStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamEndpointStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointLoadMetricStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnnamedEndpointLoadMetricStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStats_DroppedRequests); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_load_report_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_load_report_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_load_report_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_load_report_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_load_report_proto = out.File + file_envoy_config_endpoint_v3_load_report_proto_rawDesc = nil + file_envoy_config_endpoint_v3_load_report_proto_goTypes = nil + file_envoy_config_endpoint_v3_load_report_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go new file mode 100644 index 000000000..fd7a0c6c1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go @@ -0,0 +1,1094 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpstreamLocalityStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamLocalityStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamLocalityStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamLocalityStatsMultiError, or nil if none found. +func (m *UpstreamLocalityStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamLocalityStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TotalSuccessfulRequests + + // no validation rules for TotalRequestsInProgress + + // no validation rules for TotalErrorRequests + + // no validation rules for TotalIssuedRequests + + // no validation rules for TotalActiveConnections + + // no validation rules for TotalNewConnections + + // no validation rules for TotalFailConnections + + if all { + switch v := interface{}(m.GetCpuUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCpuUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMemUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApplicationUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApplicationUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetLoadMetricStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetUpstreamEndpointStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Priority + + if len(errors) > 0 { + return UpstreamLocalityStatsMultiError(errors) + } + + return nil +} + +// UpstreamLocalityStatsMultiError is an error wrapping multiple validation +// errors returned by UpstreamLocalityStats.ValidateAll() if the designated +// constraints aren't met. +type UpstreamLocalityStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamLocalityStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamLocalityStatsMultiError) AllErrors() []error { return m } + +// UpstreamLocalityStatsValidationError is the validation error returned by +// UpstreamLocalityStats.Validate if the designated constraints aren't met. +type UpstreamLocalityStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamLocalityStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamLocalityStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamLocalityStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamLocalityStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamLocalityStatsValidationError) ErrorName() string { + return "UpstreamLocalityStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamLocalityStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamLocalityStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamLocalityStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamLocalityStatsValidationError{} + +// Validate checks the field values on UpstreamEndpointStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamEndpointStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamEndpointStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamEndpointStatsMultiError, or nil if none found. +func (m *UpstreamEndpointStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamEndpointStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TotalSuccessfulRequests + + // no validation rules for TotalRequestsInProgress + + // no validation rules for TotalErrorRequests + + // no validation rules for TotalIssuedRequests + + for idx, item := range m.GetLoadMetricStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return UpstreamEndpointStatsMultiError(errors) + } + + return nil +} + +// UpstreamEndpointStatsMultiError is an error wrapping multiple validation +// errors returned by UpstreamEndpointStats.ValidateAll() if the designated +// constraints aren't met. +type UpstreamEndpointStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamEndpointStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamEndpointStatsMultiError) AllErrors() []error { return m } + +// UpstreamEndpointStatsValidationError is the validation error returned by +// UpstreamEndpointStats.Validate if the designated constraints aren't met. +type UpstreamEndpointStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamEndpointStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamEndpointStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamEndpointStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamEndpointStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamEndpointStatsValidationError) ErrorName() string { + return "UpstreamEndpointStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamEndpointStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamEndpointStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamEndpointStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamEndpointStatsValidationError{} + +// Validate checks the field values on EndpointLoadMetricStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EndpointLoadMetricStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EndpointLoadMetricStats with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EndpointLoadMetricStatsMultiError, or nil if none found. +func (m *EndpointLoadMetricStats) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointLoadMetricStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MetricName + + // no validation rules for NumRequestsFinishedWithMetric + + // no validation rules for TotalMetricValue + + if len(errors) > 0 { + return EndpointLoadMetricStatsMultiError(errors) + } + + return nil +} + +// EndpointLoadMetricStatsMultiError is an error wrapping multiple validation +// errors returned by EndpointLoadMetricStats.ValidateAll() if the designated +// constraints aren't met. +type EndpointLoadMetricStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointLoadMetricStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointLoadMetricStatsMultiError) AllErrors() []error { return m } + +// EndpointLoadMetricStatsValidationError is the validation error returned by +// EndpointLoadMetricStats.Validate if the designated constraints aren't met. +type EndpointLoadMetricStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointLoadMetricStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointLoadMetricStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointLoadMetricStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointLoadMetricStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointLoadMetricStatsValidationError) ErrorName() string { + return "EndpointLoadMetricStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointLoadMetricStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointLoadMetricStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointLoadMetricStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointLoadMetricStatsValidationError{} + +// Validate checks the field values on UnnamedEndpointLoadMetricStats with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UnnamedEndpointLoadMetricStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UnnamedEndpointLoadMetricStats with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// UnnamedEndpointLoadMetricStatsMultiError, or nil if none found. +func (m *UnnamedEndpointLoadMetricStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UnnamedEndpointLoadMetricStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumRequestsFinishedWithMetric + + // no validation rules for TotalMetricValue + + if len(errors) > 0 { + return UnnamedEndpointLoadMetricStatsMultiError(errors) + } + + return nil +} + +// UnnamedEndpointLoadMetricStatsMultiError is an error wrapping multiple +// validation errors returned by UnnamedEndpointLoadMetricStats.ValidateAll() +// if the designated constraints aren't met. +type UnnamedEndpointLoadMetricStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnnamedEndpointLoadMetricStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnnamedEndpointLoadMetricStatsMultiError) AllErrors() []error { return m } + +// UnnamedEndpointLoadMetricStatsValidationError is the validation error +// returned by UnnamedEndpointLoadMetricStats.Validate if the designated +// constraints aren't met. +type UnnamedEndpointLoadMetricStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnnamedEndpointLoadMetricStatsValidationError) ErrorName() string { + return "UnnamedEndpointLoadMetricStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UnnamedEndpointLoadMetricStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnnamedEndpointLoadMetricStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnnamedEndpointLoadMetricStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnnamedEndpointLoadMetricStatsValidationError{} + +// Validate checks the field values on ClusterStats with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStats with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterStatsMultiError, or +// nil if none found. +func (m *ClusterStats) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := ClusterStatsValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ClusterServiceName + + if len(m.GetUpstreamLocalityStats()) < 1 { + err := ClusterStatsValidationError{ + field: "UpstreamLocalityStats", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetUpstreamLocalityStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TotalDroppedRequests + + for idx, item := range m.GetDroppedRequests() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLoadReportInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadReportInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterStatsMultiError(errors) + } + + return nil +} + +// ClusterStatsMultiError is an error wrapping multiple validation errors +// returned by ClusterStats.ValidateAll() if the designated constraints aren't met. +type ClusterStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStatsMultiError) AllErrors() []error { return m } + +// ClusterStatsValidationError is the validation error returned by +// ClusterStats.Validate if the designated constraints aren't met. +type ClusterStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStatsValidationError) ErrorName() string { return "ClusterStatsValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStatsValidationError{} + +// Validate checks the field values on ClusterStats_DroppedRequests with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterStats_DroppedRequests) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStats_DroppedRequests with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterStats_DroppedRequestsMultiError, or nil if none found. +func (m *ClusterStats_DroppedRequests) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStats_DroppedRequests) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCategory()) < 1 { + err := ClusterStats_DroppedRequestsValidationError{ + field: "Category", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DroppedCount + + if len(errors) > 0 { + return ClusterStats_DroppedRequestsMultiError(errors) + } + + return nil +} + +// ClusterStats_DroppedRequestsMultiError is an error wrapping multiple +// validation errors returned by ClusterStats_DroppedRequests.ValidateAll() if +// the designated constraints aren't met. +type ClusterStats_DroppedRequestsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStats_DroppedRequestsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStats_DroppedRequestsMultiError) AllErrors() []error { return m } + +// ClusterStats_DroppedRequestsValidationError is the validation error returned +// by ClusterStats_DroppedRequests.Validate if the designated constraints +// aren't met. +type ClusterStats_DroppedRequestsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStats_DroppedRequestsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStats_DroppedRequestsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStats_DroppedRequestsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStats_DroppedRequestsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStats_DroppedRequestsValidationError) ErrorName() string { + return "ClusterStats_DroppedRequestsValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterStats_DroppedRequestsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStats_DroppedRequests.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStats_DroppedRequestsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStats_DroppedRequestsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go new file mode 100644 index 000000000..31b280a34 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go @@ -0,0 +1,696 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamLocalityStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamLocalityStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamLocalityStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApplicationUtilization != nil { + size, err := m.ApplicationUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.MemUtilization != nil { + size, err := m.MemUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CpuUtilization != nil { + size, err := m.CpuUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TotalFailConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalFailConnections)) + i-- + dAtA[i] = 0x58 + } + if m.TotalNewConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalNewConnections)) + i-- + dAtA[i] = 0x50 + } + if m.TotalActiveConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalActiveConnections)) + i-- + dAtA[i] = 0x48 + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x40 + } + if len(m.UpstreamEndpointStats) > 0 { + for iNdEx := len(m.UpstreamEndpointStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamEndpointStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x30 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamEndpointStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamEndpointStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamEndpointStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x38 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x19 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x10 + } + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x11 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats_DroppedRequests) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats_DroppedRequests) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats_DroppedRequests) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DroppedCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterServiceName) > 0 { + i -= len(m.ClusterServiceName) + copy(dAtA[i:], m.ClusterServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterServiceName))) + i-- + dAtA[i] = 0x32 + } + if len(m.DroppedRequests) > 0 { + for iNdEx := len(m.DroppedRequests) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DroppedRequests[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.LoadReportInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TotalDroppedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalDroppedRequests)) + i-- + dAtA[i] = 0x18 + } + if len(m.UpstreamLocalityStats) > 0 { + for iNdEx := len(m.UpstreamLocalityStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamLocalityStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamLocalityStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.UpstreamEndpointStats) > 0 { + for _, e := range m.UpstreamEndpointStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + if m.TotalActiveConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalActiveConnections)) + } + if m.TotalNewConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalNewConnections)) + } + if m.TotalFailConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalFailConnections)) + } + if m.CpuUtilization != nil { + l = m.CpuUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemUtilization != nil { + l = m.MemUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationUtilization != nil { + l = m.ApplicationUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamEndpointStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *UnnamedEndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats_DroppedRequests) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DroppedCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DroppedCount)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpstreamLocalityStats) > 0 { + for _, e := range m.UpstreamLocalityStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalDroppedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalDroppedRequests)) + } + if m.LoadReportInterval != nil { + l = (*durationpb.Duration)(m.LoadReportInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DroppedRequests) > 0 { + for _, e := range m.DroppedRequests { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClusterServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go new file mode 100644 index 000000000..1adc7d96f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +type ApiListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + ApiListener *anypb.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` +} + +func (x *ApiListener) Reset() { + *x = ApiListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_api_listener_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiListener) ProtoMessage() {} + +func (x *ApiListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_api_listener_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiListener.ProtoReflect.Descriptor instead. +func (*ApiListener) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP(), []int{0} +} + +func (x *ApiListener) GetApiListener() *anypb.Any { + if x != nil { + return x.ApiListener + } + return nil +} + +var File_envoy_config_listener_v3_api_listener_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_api_listener_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x70, 0x69, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0b, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0b, 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, + 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x41, 0x70, + 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_api_listener_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_api_listener_proto_rawDescData = file_envoy_config_listener_v3_api_listener_proto_rawDesc +) + +func file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_api_listener_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_api_listener_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_api_listener_proto_rawDescData) + }) + return file_envoy_config_listener_v3_api_listener_proto_rawDescData +} + +var file_envoy_config_listener_v3_api_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_listener_v3_api_listener_proto_goTypes = []interface{}{ + (*ApiListener)(nil), // 0: envoy.config.listener.v3.ApiListener + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_envoy_config_listener_v3_api_listener_proto_depIdxs = []int32{ + 1, // 0: envoy.config.listener.v3.ApiListener.api_listener:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_api_listener_proto_init() } +func file_envoy_config_listener_v3_api_listener_proto_init() { + if File_envoy_config_listener_v3_api_listener_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_api_listener_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_api_listener_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_api_listener_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_api_listener_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_api_listener_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_api_listener_proto = out.File + file_envoy_config_listener_v3_api_listener_proto_rawDesc = nil + file_envoy_config_listener_v3_api_listener_proto_goTypes = nil + file_envoy_config_listener_v3_api_listener_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go new file mode 100644 index 000000000..56954a35d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go @@ -0,0 +1,165 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ApiListener with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ApiListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiListener with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ApiListenerMultiError, or +// nil if none found. +func (m *ApiListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetApiListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ApiListenerMultiError(errors) + } + + return nil +} + +// ApiListenerMultiError is an error wrapping multiple validation errors +// returned by ApiListener.ValidateAll() if the designated constraints aren't met. +type ApiListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiListenerMultiError) AllErrors() []error { return m } + +// ApiListenerValidationError is the validation error returned by +// ApiListener.Validate if the designated constraints aren't met. +type ApiListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiListenerValidationError) ErrorName() string { return "ApiListenerValidationError" } + +// Error satisfies the builtin error interface +func (e ApiListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiListenerValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go new file mode 100644 index 000000000..0d24f32c6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApiListener != nil { + size, err := (*anypb.Any)(m.ApiListener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiListener != nil { + l = (*anypb.Any)(m.ApiListener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go new file mode 100644 index 000000000..c639e62b1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go @@ -0,0 +1,1593 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v31 "github.com/cncf/xds/go/xds/core/v3" + v32 "github.com/cncf/xds/go/xds/type/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v33 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Listener_DrainType int32 + +const ( + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + Listener_DEFAULT Listener_DrainType = 0 + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + Listener_MODIFY_ONLY Listener_DrainType = 1 +) + +// Enum value maps for Listener_DrainType. +var ( + Listener_DrainType_name = map[int32]string{ + 0: "DEFAULT", + 1: "MODIFY_ONLY", + } + Listener_DrainType_value = map[string]int32{ + "DEFAULT": 0, + "MODIFY_ONLY": 1, + } +) + +func (x Listener_DrainType) Enum() *Listener_DrainType { + p := new(Listener_DrainType) + *p = x + return p +} + +func (x Listener_DrainType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Listener_DrainType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_listener_v3_listener_proto_enumTypes[0].Descriptor() +} + +func (Listener_DrainType) Type() protoreflect.EnumType { + return &file_envoy_config_listener_v3_listener_proto_enumTypes[0] +} + +func (x Listener_DrainType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Listener_DrainType.Descriptor instead. +func (Listener_DrainType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 0} +} + +// The additional address the listener is listening on. +type AdditionalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the listener. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *v3.SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *AdditionalAddress) Reset() { + *x = AdditionalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalAddress) ProtoMessage() {} + +func (x *AdditionalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalAddress.ProtoReflect.Descriptor instead. +func (*AdditionalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{0} +} + +func (x *AdditionalAddress) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *AdditionalAddress) GetSocketOptions() *v3.SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// Listener list collections. Entries are “Listener“ resources or references. +// [#not-implemented-hide:] +type ListenerCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*v31.CollectionEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *ListenerCollection) Reset() { + *x = ListenerCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerCollection) ProtoMessage() {} + +func (x *ListenerCollection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerCollection.ProtoReflect.Descriptor instead. +func (*ListenerCollection) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenerCollection) GetEntries() []*v31.CollectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +// [#next-free-field: 36] +type Listener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + // Required unless “api_listener“ or “listener_specifier“ is populated. + Address *v3.Address `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // The additional addresses the listener should listen on. The addresses must be unique across all + // listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, + // all addresses use the same protocol, and multiple internal addresses are not supported. + AdditionalAddresses []*AdditionalAddress `protobuf:"bytes,33,rep,name=additional_addresses,json=additionalAddresses,proto3" json:"additional_addresses,omitempty"` + // Optional prefix to use on listener stats. If empty, the stats will be rooted at + // “listener.
.“. If non-empty, stats will be rooted at + // “listener..“. + StatPrefix string `protobuf:"bytes,28,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + FilterChains []*FilterChain `protobuf:"bytes,3,rep,name=filter_chains,json=filterChains,proto3" json:"filter_chains,omitempty"` + // :ref:`Matcher API ` resolving the filter chain name from the + // network properties. This matcher is used as a replacement for the filter chain match condition + // :ref:`filter_chain_match + // `. If specified, all + // :ref:`filter_chains ` must have a + // non-empty and unique :ref:`name ` field + // and not specify :ref:`filter_chain_match + // ` field. + // + // .. note:: + // + // Once matched, each connection is permanently bound to its filter chain. + // If the matcher changes but the filter chain remains the same, the + // connections bound to the filter chain are not drained. If, however, the + // filter chain is removed or structurally modified, then the drain for its + // connections is initiated. + FilterChainMatcher *v32.Matcher `protobuf:"bytes,32,opt,name=filter_chain_matcher,json=filterChainMatcher,proto3" json:"filter_chain_matcher,omitempty"` + // If a connection is redirected using “iptables“, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + UseOriginalDst *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_original_dst,json=useOriginalDst,proto3" json:"use_original_dst,omitempty"` + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + DefaultFilterChain *FilterChain `protobuf:"bytes,25,opt,name=default_filter_chain,json=defaultFilterChain,proto3" json:"default_filter_chain,omitempty"` + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + // Listener metadata. + Metadata *v3.Metadata `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. + DeprecatedV1 *Listener_DeprecatedV1 `protobuf:"bytes,7,opt,name=deprecated_v1,json=deprecatedV1,proto3" json:"deprecated_v1,omitempty"` + // The type of draining to perform at a listener-wide level. + DrainType Listener_DrainType `protobuf:"varint,8,opt,name=drain_type,json=drainType,proto3,enum=envoy.config.listener.v3.Listener_DrainType" json:"drain_type,omitempty"` + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // ` and no + // :ref:`quic_options ` is specified in :ref:`udp_listener_config `. + // QUIC listener filters can be specified when :ref:`quic_options + // ` is + // specified in :ref:`udp_listener_config `. + // They are processed sequentially right before connection creation. And like TCP Listener filters, they can be used to manipulate the connection metadata and socket. But the difference is that they can't be used to pause connection creation. + ListenerFilters []*ListenerFilter `protobuf:"bytes,9,rep,name=listener_filters,json=listenerFilters,proto3" json:"listener_filters,omitempty"` + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // “continue_on_listener_filters_timeout“ is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + ListenerFiltersTimeout *durationpb.Duration `protobuf:"bytes,15,opt,name=listener_filters_timeout,json=listenerFiltersTimeout,proto3" json:"listener_filters_timeout,omitempty"` + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + ContinueOnListenerFiltersTimeout bool `protobuf:"varint,17,opt,name=continue_on_listener_filters_timeout,json=continueOnListenerFiltersTimeout,proto3" json:"continue_on_listener_filters_timeout,omitempty"` + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // “iptables“ “TPROXY“ target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using “TPROXY“ cannot be distinguished from connections redirected using “TPROXY“ and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the “CAP_NET_ADMIN“ capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + Transparent *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=transparent,proto3" json:"transparent,omitempty"` + // Whether the listener should set the “IP_FREEBIND“ socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option “IP_FREEBIND“ is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + Freebind *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=freebind,proto3" json:"freebind,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. The socket options can be updated for a listener when + // :ref:`enable_reuse_port ` + // is “true“. Otherwise, if socket options change during a listener update the update will be rejected + // to make it clear that the options were not updated. + SocketOptions []*v3.SocketOption `protobuf:"bytes,13,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + TcpFastOpenQueueLength *wrapperspb.UInt32Value `protobuf:"bytes,12,opt,name=tcp_fast_open_queue_length,json=tcpFastOpenQueueLength,proto3" json:"tcp_fast_open_queue_length,omitempty"` + // Specifies the intended direction of the traffic relative to the local Envoy. + // This property is required on Windows for listeners using the original destination filter, + // see :ref:`Original Destination `. + TrafficDirection v3.TrafficDirection `protobuf:"varint,16,opt,name=traffic_direction,json=trafficDirection,proto3,enum=envoy.config.core.v3.TrafficDirection" json:"traffic_direction,omitempty"` + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies UDP + // listener specific configuration. + UdpListenerConfig *UdpListenerConfig `protobuf:"bytes,18,opt,name=udp_listener_config,json=udpListenerConfig,proto3" json:"udp_listener_config,omitempty"` + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener *ApiListener `protobuf:"bytes,19,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + // + // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 + // by setting :ref:`use_original_dst ` in X + // and :ref:`bind_to_port ` to false in Y1 and Y2, + // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and + // enable the balance config in Y1 and Y2 to balance the connections among the workers. + ConnectionBalanceConfig *Listener_ConnectionBalanceConfig `protobuf:"bytes,20,opt,name=connection_balance_config,json=connectionBalanceConfig,proto3" json:"connection_balance_config,omitempty"` + // Deprecated. Use “enable_reuse_port“ instead. + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. + ReusePort bool `protobuf:"varint,21,opt,name=reuse_port,json=reusePort,proto3" json:"reuse_port,omitempty"` + // When this flag is set to true, listeners set the “SO_REUSEPORT“ socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. The change of field will be rejected during an listener update when the + // runtime flag “envoy.reloadable_features.enable_update_listener_socket_options“ is enabled. + // Otherwise, the update of this field will be ignored quietly. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. + // + // - On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // - On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // - On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + EnableReusePort *wrapperspb.BoolValue `protobuf:"bytes,29,opt,name=enable_reuse_port,json=enableReusePort,proto3" json:"enable_reuse_port,omitempty"` + // Configuration for :ref:`access logs ` + // emitted by this listener. + AccessLog []*v33.AccessLog `protobuf:"bytes,22,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The maximum length a tcp listener's pending connections queue can grow to. If no value is + // provided net.core.somaxconn will be used on Linux and 128 otherwise. + TcpBacklogSize *wrapperspb.UInt32Value `protobuf:"bytes,24,opt,name=tcp_backlog_size,json=tcpBacklogSize,proto3" json:"tcp_backlog_size,omitempty"` + // The maximum number of connections to accept from the kernel per socket + // event. Envoy may decide to close these connections after accepting them + // from the kernel e.g. due to load shedding, or other policies. + // If there are more than max_connections_to_accept_per_socket_event + // connections pending accept, connections over this threshold will be + // accepted in later event loop iterations. + // If no value is provided Envoy will accept all connections pending accept + // from the kernel. + MaxConnectionsToAcceptPerSocketEvent *wrapperspb.UInt32Value `protobuf:"bytes,34,opt,name=max_connections_to_accept_per_socket_event,json=maxConnectionsToAcceptPerSocketEvent,proto3" json:"max_connections_to_accept_per_socket_event,omitempty"` + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that set + // :ref:`use_original_dst ` + // to true. Default is true. + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` + // The exclusive listener type and the corresponding config. + // + // Types that are assignable to ListenerSpecifier: + // + // *Listener_InternalListener + ListenerSpecifier isListener_ListenerSpecifier `protobuf_oneof:"listener_specifier"` + // Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to establish + // MPTCP connections. Non-MPTCP clients will fall back to regular TCP. + EnableMptcp bool `protobuf:"varint,30,opt,name=enable_mptcp,json=enableMptcp,proto3" json:"enable_mptcp,omitempty"` + // Whether the listener should limit connections based upon the value of + // :ref:`global_downstream_max_connections `. + IgnoreGlobalConnLimit bool `protobuf:"varint,31,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"` + // Whether the listener bypasses configured overload manager actions. + BypassOverloadManager bool `protobuf:"varint,35,opt,name=bypass_overload_manager,json=bypassOverloadManager,proto3" json:"bypass_overload_manager,omitempty"` +} + +func (x *Listener) Reset() { + *x = Listener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener) ProtoMessage() {} + +func (x *Listener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener.ProtoReflect.Descriptor instead. +func (*Listener) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2} +} + +func (x *Listener) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Listener) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Listener) GetAdditionalAddresses() []*AdditionalAddress { + if x != nil { + return x.AdditionalAddresses + } + return nil +} + +func (x *Listener) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (x *Listener) GetFilterChains() []*FilterChain { + if x != nil { + return x.FilterChains + } + return nil +} + +func (x *Listener) GetFilterChainMatcher() *v32.Matcher { + if x != nil { + return x.FilterChainMatcher + } + return nil +} + +func (x *Listener) GetUseOriginalDst() *wrapperspb.BoolValue { + if x != nil { + return x.UseOriginalDst + } + return nil +} + +func (x *Listener) GetDefaultFilterChain() *FilterChain { + if x != nil { + return x.DefaultFilterChain + } + return nil +} + +func (x *Listener) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerConnectionBufferLimitBytes + } + return nil +} + +func (x *Listener) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. +func (x *Listener) GetDeprecatedV1() *Listener_DeprecatedV1 { + if x != nil { + return x.DeprecatedV1 + } + return nil +} + +func (x *Listener) GetDrainType() Listener_DrainType { + if x != nil { + return x.DrainType + } + return Listener_DEFAULT +} + +func (x *Listener) GetListenerFilters() []*ListenerFilter { + if x != nil { + return x.ListenerFilters + } + return nil +} + +func (x *Listener) GetListenerFiltersTimeout() *durationpb.Duration { + if x != nil { + return x.ListenerFiltersTimeout + } + return nil +} + +func (x *Listener) GetContinueOnListenerFiltersTimeout() bool { + if x != nil { + return x.ContinueOnListenerFiltersTimeout + } + return false +} + +func (x *Listener) GetTransparent() *wrapperspb.BoolValue { + if x != nil { + return x.Transparent + } + return nil +} + +func (x *Listener) GetFreebind() *wrapperspb.BoolValue { + if x != nil { + return x.Freebind + } + return nil +} + +func (x *Listener) GetSocketOptions() []*v3.SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *Listener) GetTcpFastOpenQueueLength() *wrapperspb.UInt32Value { + if x != nil { + return x.TcpFastOpenQueueLength + } + return nil +} + +func (x *Listener) GetTrafficDirection() v3.TrafficDirection { + if x != nil { + return x.TrafficDirection + } + return v3.TrafficDirection(0) +} + +func (x *Listener) GetUdpListenerConfig() *UdpListenerConfig { + if x != nil { + return x.UdpListenerConfig + } + return nil +} + +func (x *Listener) GetApiListener() *ApiListener { + if x != nil { + return x.ApiListener + } + return nil +} + +func (x *Listener) GetConnectionBalanceConfig() *Listener_ConnectionBalanceConfig { + if x != nil { + return x.ConnectionBalanceConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. +func (x *Listener) GetReusePort() bool { + if x != nil { + return x.ReusePort + } + return false +} + +func (x *Listener) GetEnableReusePort() *wrapperspb.BoolValue { + if x != nil { + return x.EnableReusePort + } + return nil +} + +func (x *Listener) GetAccessLog() []*v33.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +func (x *Listener) GetTcpBacklogSize() *wrapperspb.UInt32Value { + if x != nil { + return x.TcpBacklogSize + } + return nil +} + +func (x *Listener) GetMaxConnectionsToAcceptPerSocketEvent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnectionsToAcceptPerSocketEvent + } + return nil +} + +func (x *Listener) GetBindToPort() *wrapperspb.BoolValue { + if x != nil { + return x.BindToPort + } + return nil +} + +func (m *Listener) GetListenerSpecifier() isListener_ListenerSpecifier { + if m != nil { + return m.ListenerSpecifier + } + return nil +} + +func (x *Listener) GetInternalListener() *Listener_InternalListenerConfig { + if x, ok := x.GetListenerSpecifier().(*Listener_InternalListener); ok { + return x.InternalListener + } + return nil +} + +func (x *Listener) GetEnableMptcp() bool { + if x != nil { + return x.EnableMptcp + } + return false +} + +func (x *Listener) GetIgnoreGlobalConnLimit() bool { + if x != nil { + return x.IgnoreGlobalConnLimit + } + return false +} + +func (x *Listener) GetBypassOverloadManager() bool { + if x != nil { + return x.BypassOverloadManager + } + return false +} + +type isListener_ListenerSpecifier interface { + isListener_ListenerSpecifier() +} + +type Listener_InternalListener struct { + // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the + // :ref:`envoy cluster ` to create a user space connection to. + // The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + // Upstream clusters refer to the internal listeners by their :ref:`name + // `. :ref:`Address + // ` must not be set on the internal listeners. + // + // There are some limitations that are derived from the implementation. The known limitations include: + // + // - :ref:`ConnectionBalanceConfig ` is not + // allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. + // - :ref:`tcp_backlog_size ` + // - :ref:`freebind ` + // - :ref:`transparent ` + InternalListener *Listener_InternalListenerConfig `protobuf:"bytes,27,opt,name=internal_listener,json=internalListener,proto3,oneof"` +} + +func (*Listener_InternalListener) isListener_ListenerSpecifier() {} + +// A placeholder proto so that users can explicitly configure the standard +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListenerManager) Reset() { + *x = ListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerManager) ProtoMessage() {} + +func (x *ListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerManager.ProtoReflect.Descriptor instead. +func (*ListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{3} +} + +// A placeholder proto so that users can explicitly configure the standard +// Validation Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ValidationListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidationListenerManager) Reset() { + *x = ValidationListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidationListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidationListenerManager) ProtoMessage() {} + +func (x *ValidationListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidationListenerManager.ProtoReflect.Descriptor instead. +func (*ValidationListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{4} +} + +// A placeholder proto so that users can explicitly configure the API +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ApiListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApiListenerManager) Reset() { + *x = ApiListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiListenerManager) ProtoMessage() {} + +func (x *ApiListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiListenerManager.ProtoReflect.Descriptor instead. +func (*ApiListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{5} +} + +// [#not-implemented-hide:] +type Listener_DeprecatedV1 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated. Use :ref:`Listener.bind_to_port + // ` + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` +} + +func (x *Listener_DeprecatedV1) Reset() { + *x = Listener_DeprecatedV1{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_DeprecatedV1) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_DeprecatedV1) ProtoMessage() {} + +func (x *Listener_DeprecatedV1) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_DeprecatedV1.ProtoReflect.Descriptor instead. +func (*Listener_DeprecatedV1) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Listener_DeprecatedV1) GetBindToPort() *wrapperspb.BoolValue { + if x != nil { + return x.BindToPort + } + return nil +} + +// Configuration for listener connection balancing. +type Listener_ConnectionBalanceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to BalanceType: + // + // *Listener_ConnectionBalanceConfig_ExactBalance_ + // *Listener_ConnectionBalanceConfig_ExtendBalance + BalanceType isListener_ConnectionBalanceConfig_BalanceType `protobuf_oneof:"balance_type"` +} + +func (x *Listener_ConnectionBalanceConfig) Reset() { + *x = Listener_ConnectionBalanceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_ConnectionBalanceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_ConnectionBalanceConfig) ProtoMessage() {} + +func (x *Listener_ConnectionBalanceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_ConnectionBalanceConfig.ProtoReflect.Descriptor instead. +func (*Listener_ConnectionBalanceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 1} +} + +func (m *Listener_ConnectionBalanceConfig) GetBalanceType() isListener_ConnectionBalanceConfig_BalanceType { + if m != nil { + return m.BalanceType + } + return nil +} + +func (x *Listener_ConnectionBalanceConfig) GetExactBalance() *Listener_ConnectionBalanceConfig_ExactBalance { + if x, ok := x.GetBalanceType().(*Listener_ConnectionBalanceConfig_ExactBalance_); ok { + return x.ExactBalance + } + return nil +} + +func (x *Listener_ConnectionBalanceConfig) GetExtendBalance() *v3.TypedExtensionConfig { + if x, ok := x.GetBalanceType().(*Listener_ConnectionBalanceConfig_ExtendBalance); ok { + return x.ExtendBalance + } + return nil +} + +type isListener_ConnectionBalanceConfig_BalanceType interface { + isListener_ConnectionBalanceConfig_BalanceType() +} + +type Listener_ConnectionBalanceConfig_ExactBalance_ struct { + // If specified, the listener will use the exact connection balancer. + ExactBalance *Listener_ConnectionBalanceConfig_ExactBalance `protobuf:"bytes,1,opt,name=exact_balance,json=exactBalance,proto3,oneof"` +} + +type Listener_ConnectionBalanceConfig_ExtendBalance struct { + // The listener will use the connection balancer according to “type_url“. If “type_url“ is invalid, + // Envoy will not attempt to balance active connections between worker threads. + // [#extension-category: envoy.network.connection_balance] + ExtendBalance *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=extend_balance,json=extendBalance,proto3,oneof"` +} + +func (*Listener_ConnectionBalanceConfig_ExactBalance_) isListener_ConnectionBalanceConfig_BalanceType() { +} + +func (*Listener_ConnectionBalanceConfig_ExtendBalance) isListener_ConnectionBalanceConfig_BalanceType() { +} + +// Configuration for envoy internal listener. All the future internal listener features should be added here. +type Listener_InternalListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Listener_InternalListenerConfig) Reset() { + *x = Listener_InternalListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_InternalListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_InternalListenerConfig) ProtoMessage() {} + +func (x *Listener_InternalListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_InternalListenerConfig.ProtoReflect.Descriptor instead. +func (*Listener_InternalListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 2} +} + +// A connection balancer implementation that does exact balancing. This means that a lock is +// held during balancing so that connection counts are nearly exactly balanced between worker +// threads. This is "nearly" exact in the sense that a connection might close in parallel thus +// making the counts incorrect, but this should be rectified on the next accept. This balancer +// sacrifices accept throughput for accuracy and should be used when there are a small number of +// connections that rarely cycle (e.g., service mesh gRPC egress). +type Listener_ConnectionBalanceConfig_ExactBalance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) Reset() { + *x = Listener_ConnectionBalanceConfig_ExactBalance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_ConnectionBalanceConfig_ExactBalance) ProtoMessage() {} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_ConnectionBalanceConfig_ExactBalance.ProtoReflect.Descriptor instead. +func (*Listener_ConnectionBalanceConfig_ExactBalance) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 1, 0} +} + +var File_envoy_config_listener_v3_listener_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_listener_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x70, + 0x69, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x75, + 0x64, 0x70, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, + 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0xbe, 0x18, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x14, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x4a, 0x0a, + 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, + 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x4f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x14, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x6f, 0x0a, 0x21, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, + 0x2a, 0x02, 0x08, 0x01, 0x52, 0x1d, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x61, 0x0a, 0x0d, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x31, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x56, 0x31, 0x12, 0x4b, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x53, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x18, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x24, 0x63, 0x6f, 0x6e, + 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x65, 0x4f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, + 0x69, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, + 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x1a, 0x74, 0x63, + 0x70, 0x5f, 0x66, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x74, 0x63, + 0x70, 0x46, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x13, 0x75, 0x64, 0x70, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x11, 0x75, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x76, 0x0a, 0x19, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x17, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x72, 0x65, 0x75, 0x73, + 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x75, 0x73, 0x65, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, + 0x65, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x43, 0x0a, 0x0a, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x12, 0x46, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x42, 0x61, + 0x63, 0x6b, 0x6c, 0x6f, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x7f, 0x0a, 0x2a, 0x6d, 0x61, 0x78, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x5f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x2a, 0x02, 0x20, 0x00, 0x52, 0x24, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x50, 0x65, 0x72, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, + 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, + 0x6e, 0x64, 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x68, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x70, 0x74, + 0x63, 0x70, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x4d, 0x70, 0x74, 0x63, 0x70, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, + 0x0a, 0x17, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x23, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x1a, 0x77, 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, + 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, + 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x1a, + 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x0d, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x13, 0x0a, 0x0c, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x18, + 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x29, 0x0a, 0x09, 0x44, 0x72, 0x61, 0x69, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, + 0x17, 0x10, 0x18, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_listener_v3_listener_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_listener_proto_rawDescData = file_envoy_config_listener_v3_listener_proto_rawDesc +) + +func file_envoy_config_listener_v3_listener_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_listener_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_listener_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_listener_proto_rawDescData) + }) + return file_envoy_config_listener_v3_listener_proto_rawDescData +} + +var file_envoy_config_listener_v3_listener_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_listener_v3_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_envoy_config_listener_v3_listener_proto_goTypes = []interface{}{ + (Listener_DrainType)(0), // 0: envoy.config.listener.v3.Listener.DrainType + (*AdditionalAddress)(nil), // 1: envoy.config.listener.v3.AdditionalAddress + (*ListenerCollection)(nil), // 2: envoy.config.listener.v3.ListenerCollection + (*Listener)(nil), // 3: envoy.config.listener.v3.Listener + (*ListenerManager)(nil), // 4: envoy.config.listener.v3.ListenerManager + (*ValidationListenerManager)(nil), // 5: envoy.config.listener.v3.ValidationListenerManager + (*ApiListenerManager)(nil), // 6: envoy.config.listener.v3.ApiListenerManager + (*Listener_DeprecatedV1)(nil), // 7: envoy.config.listener.v3.Listener.DeprecatedV1 + (*Listener_ConnectionBalanceConfig)(nil), // 8: envoy.config.listener.v3.Listener.ConnectionBalanceConfig + (*Listener_InternalListenerConfig)(nil), // 9: envoy.config.listener.v3.Listener.InternalListenerConfig + (*Listener_ConnectionBalanceConfig_ExactBalance)(nil), // 10: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + (*v3.Address)(nil), // 11: envoy.config.core.v3.Address + (*v3.SocketOptionsOverride)(nil), // 12: envoy.config.core.v3.SocketOptionsOverride + (*v31.CollectionEntry)(nil), // 13: xds.core.v3.CollectionEntry + (*FilterChain)(nil), // 14: envoy.config.listener.v3.FilterChain + (*v32.Matcher)(nil), // 15: xds.type.matcher.v3.Matcher + (*wrapperspb.BoolValue)(nil), // 16: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*v3.Metadata)(nil), // 18: envoy.config.core.v3.Metadata + (*ListenerFilter)(nil), // 19: envoy.config.listener.v3.ListenerFilter + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*v3.SocketOption)(nil), // 21: envoy.config.core.v3.SocketOption + (v3.TrafficDirection)(0), // 22: envoy.config.core.v3.TrafficDirection + (*UdpListenerConfig)(nil), // 23: envoy.config.listener.v3.UdpListenerConfig + (*ApiListener)(nil), // 24: envoy.config.listener.v3.ApiListener + (*v33.AccessLog)(nil), // 25: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 26: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_listener_v3_listener_proto_depIdxs = []int32{ + 11, // 0: envoy.config.listener.v3.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 12, // 1: envoy.config.listener.v3.AdditionalAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 13, // 2: envoy.config.listener.v3.ListenerCollection.entries:type_name -> xds.core.v3.CollectionEntry + 11, // 3: envoy.config.listener.v3.Listener.address:type_name -> envoy.config.core.v3.Address + 1, // 4: envoy.config.listener.v3.Listener.additional_addresses:type_name -> envoy.config.listener.v3.AdditionalAddress + 14, // 5: envoy.config.listener.v3.Listener.filter_chains:type_name -> envoy.config.listener.v3.FilterChain + 15, // 6: envoy.config.listener.v3.Listener.filter_chain_matcher:type_name -> xds.type.matcher.v3.Matcher + 16, // 7: envoy.config.listener.v3.Listener.use_original_dst:type_name -> google.protobuf.BoolValue + 14, // 8: envoy.config.listener.v3.Listener.default_filter_chain:type_name -> envoy.config.listener.v3.FilterChain + 17, // 9: envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 18, // 10: envoy.config.listener.v3.Listener.metadata:type_name -> envoy.config.core.v3.Metadata + 7, // 11: envoy.config.listener.v3.Listener.deprecated_v1:type_name -> envoy.config.listener.v3.Listener.DeprecatedV1 + 0, // 12: envoy.config.listener.v3.Listener.drain_type:type_name -> envoy.config.listener.v3.Listener.DrainType + 19, // 13: envoy.config.listener.v3.Listener.listener_filters:type_name -> envoy.config.listener.v3.ListenerFilter + 20, // 14: envoy.config.listener.v3.Listener.listener_filters_timeout:type_name -> google.protobuf.Duration + 16, // 15: envoy.config.listener.v3.Listener.transparent:type_name -> google.protobuf.BoolValue + 16, // 16: envoy.config.listener.v3.Listener.freebind:type_name -> google.protobuf.BoolValue + 21, // 17: envoy.config.listener.v3.Listener.socket_options:type_name -> envoy.config.core.v3.SocketOption + 17, // 18: envoy.config.listener.v3.Listener.tcp_fast_open_queue_length:type_name -> google.protobuf.UInt32Value + 22, // 19: envoy.config.listener.v3.Listener.traffic_direction:type_name -> envoy.config.core.v3.TrafficDirection + 23, // 20: envoy.config.listener.v3.Listener.udp_listener_config:type_name -> envoy.config.listener.v3.UdpListenerConfig + 24, // 21: envoy.config.listener.v3.Listener.api_listener:type_name -> envoy.config.listener.v3.ApiListener + 8, // 22: envoy.config.listener.v3.Listener.connection_balance_config:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig + 16, // 23: envoy.config.listener.v3.Listener.enable_reuse_port:type_name -> google.protobuf.BoolValue + 25, // 24: envoy.config.listener.v3.Listener.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 17, // 25: envoy.config.listener.v3.Listener.tcp_backlog_size:type_name -> google.protobuf.UInt32Value + 17, // 26: envoy.config.listener.v3.Listener.max_connections_to_accept_per_socket_event:type_name -> google.protobuf.UInt32Value + 16, // 27: envoy.config.listener.v3.Listener.bind_to_port:type_name -> google.protobuf.BoolValue + 9, // 28: envoy.config.listener.v3.Listener.internal_listener:type_name -> envoy.config.listener.v3.Listener.InternalListenerConfig + 16, // 29: envoy.config.listener.v3.Listener.DeprecatedV1.bind_to_port:type_name -> google.protobuf.BoolValue + 10, // 30: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.exact_balance:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + 26, // 31: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.extend_balance:type_name -> envoy.config.core.v3.TypedExtensionConfig + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_listener_proto_init() } +func file_envoy_config_listener_v3_listener_proto_init() { + if File_envoy_config_listener_v3_listener_proto != nil { + return + } + file_envoy_config_listener_v3_api_listener_proto_init() + file_envoy_config_listener_v3_listener_components_proto_init() + file_envoy_config_listener_v3_udp_listener_config_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_listener_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidationListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_DeprecatedV1); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_ConnectionBalanceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_InternalListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_ConnectionBalanceConfig_ExactBalance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Listener_InternalListener)(nil), + } + file_envoy_config_listener_v3_listener_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*Listener_ConnectionBalanceConfig_ExactBalance_)(nil), + (*Listener_ConnectionBalanceConfig_ExtendBalance)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_listener_proto_rawDesc, + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_listener_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_listener_proto_depIdxs, + EnumInfos: file_envoy_config_listener_v3_listener_proto_enumTypes, + MessageInfos: file_envoy_config_listener_v3_listener_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_listener_proto = out.File + file_envoy_config_listener_v3_listener_proto_rawDesc = nil + file_envoy_config_listener_v3_listener_proto_goTypes = nil + file_envoy_config_listener_v3_listener_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go new file mode 100644 index 000000000..9fd429bae --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go @@ -0,0 +1,2030 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.TrafficDirection(0) +) + +// Validate checks the field values on AdditionalAddress with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AdditionalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AdditionalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AdditionalAddressMultiError, or nil if none found. +func (m *AdditionalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *AdditionalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return AdditionalAddressMultiError(errors) + } + + return nil +} + +// AdditionalAddressMultiError is an error wrapping multiple validation errors +// returned by AdditionalAddress.ValidateAll() if the designated constraints +// aren't met. +type AdditionalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdditionalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdditionalAddressMultiError) AllErrors() []error { return m } + +// AdditionalAddressValidationError is the validation error returned by +// AdditionalAddress.Validate if the designated constraints aren't met. +type AdditionalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdditionalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdditionalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdditionalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdditionalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdditionalAddressValidationError) ErrorName() string { + return "AdditionalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e AdditionalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdditionalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdditionalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdditionalAddressValidationError{} + +// Validate checks the field values on ListenerCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListenerCollection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenerCollectionMultiError, or nil if none found. +func (m *ListenerCollection) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerCollection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetEntries() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerCollectionMultiError(errors) + } + + return nil +} + +// ListenerCollectionMultiError is an error wrapping multiple validation errors +// returned by ListenerCollection.ValidateAll() if the designated constraints +// aren't met. +type ListenerCollectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerCollectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerCollectionMultiError) AllErrors() []error { return m } + +// ListenerCollectionValidationError is the validation error returned by +// ListenerCollection.Validate if the designated constraints aren't met. +type ListenerCollectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerCollectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerCollectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerCollectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerCollectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerCollectionValidationError) ErrorName() string { + return "ListenerCollectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerCollectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerCollection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerCollectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerCollectionValidationError{} + +// Validate checks the field values on Listener with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerMultiError, or nil +// if none found. +func (m *Listener) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAdditionalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for StatPrefix + + for idx, item := range m.GetFilterChains() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetFilterChainMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterChainMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseOriginalDst()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseOriginalDst()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDefaultFilterChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultFilterChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerConnectionBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerConnectionBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDeprecatedV1()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDeprecatedV1()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DrainType + + for idx, item := range m.GetListenerFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetListenerFiltersTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerFiltersTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ContinueOnListenerFiltersTimeout + + if all { + switch v := interface{}(m.GetTransparent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransparent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFreebind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTcpFastOpenQueueLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpFastOpenQueueLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrafficDirection + + if all { + switch v := interface{}(m.GetUdpListenerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUdpListenerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApiListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionBalanceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionBalanceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ReusePort + + if all { + switch v := interface{}(m.GetEnableReusePort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableReusePort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTcpBacklogSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpBacklogSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxConnectionsToAcceptPerSocketEvent(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := ListenerValidationError{ + field: "MaxConnectionsToAcceptPerSocketEvent", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetBindToPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBindToPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableMptcp + + // no validation rules for IgnoreGlobalConnLimit + + // no validation rules for BypassOverloadManager + + switch v := m.ListenerSpecifier.(type) { + case *Listener_InternalListener: + if v == nil { + err := ListenerValidationError{ + field: "ListenerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInternalListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ListenerMultiError(errors) + } + + return nil +} + +// ListenerMultiError is an error wrapping multiple validation errors returned +// by Listener.ValidateAll() if the designated constraints aren't met. +type ListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerMultiError) AllErrors() []error { return m } + +// ListenerValidationError is the validation error returned by +// Listener.Validate if the designated constraints aren't met. +type ListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerValidationError) ErrorName() string { return "ListenerValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerValidationError{} + +// Validate checks the field values on ListenerManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenerManagerMultiError, or nil if none found. +func (m *ListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ListenerManagerMultiError(errors) + } + + return nil +} + +// ListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerManagerMultiError) AllErrors() []error { return m } + +// ListenerManagerValidationError is the validation error returned by +// ListenerManager.Validate if the designated constraints aren't met. +type ListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerManagerValidationError) ErrorName() string { return "ListenerManagerValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerManagerValidationError{} + +// Validate checks the field values on ValidationListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ValidationListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValidationListenerManager with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ValidationListenerManagerMultiError, or nil if none found. +func (m *ValidationListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ValidationListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ValidationListenerManagerMultiError(errors) + } + + return nil +} + +// ValidationListenerManagerMultiError is an error wrapping multiple validation +// errors returned by ValidationListenerManager.ValidateAll() if the +// designated constraints aren't met. +type ValidationListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValidationListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValidationListenerManagerMultiError) AllErrors() []error { return m } + +// ValidationListenerManagerValidationError is the validation error returned by +// ValidationListenerManager.Validate if the designated constraints aren't met. +type ValidationListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValidationListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValidationListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValidationListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValidationListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValidationListenerManagerValidationError) ErrorName() string { + return "ValidationListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ValidationListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValidationListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValidationListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValidationListenerManagerValidationError{} + +// Validate checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ApiListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApiListenerManagerMultiError, or nil if none found. +func (m *ApiListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ApiListenerManagerMultiError(errors) + } + + return nil +} + +// ApiListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ApiListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ApiListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiListenerManagerMultiError) AllErrors() []error { return m } + +// ApiListenerManagerValidationError is the validation error returned by +// ApiListenerManager.Validate if the designated constraints aren't met. +type ApiListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiListenerManagerValidationError) ErrorName() string { + return "ApiListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ApiListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiListenerManagerValidationError{} + +// Validate checks the field values on Listener_DeprecatedV1 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Listener_DeprecatedV1) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_DeprecatedV1 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Listener_DeprecatedV1MultiError, or nil if none found. +func (m *Listener_DeprecatedV1) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_DeprecatedV1) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBindToPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBindToPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Listener_DeprecatedV1MultiError(errors) + } + + return nil +} + +// Listener_DeprecatedV1MultiError is an error wrapping multiple validation +// errors returned by Listener_DeprecatedV1.ValidateAll() if the designated +// constraints aren't met. +type Listener_DeprecatedV1MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_DeprecatedV1MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_DeprecatedV1MultiError) AllErrors() []error { return m } + +// Listener_DeprecatedV1ValidationError is the validation error returned by +// Listener_DeprecatedV1.Validate if the designated constraints aren't met. +type Listener_DeprecatedV1ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_DeprecatedV1ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_DeprecatedV1ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_DeprecatedV1ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_DeprecatedV1ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_DeprecatedV1ValidationError) ErrorName() string { + return "Listener_DeprecatedV1ValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_DeprecatedV1ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_DeprecatedV1.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_DeprecatedV1ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_DeprecatedV1ValidationError{} + +// Validate checks the field values on Listener_ConnectionBalanceConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Listener_ConnectionBalanceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_ConnectionBalanceConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Listener_ConnectionBalanceConfigMultiError, or nil if none found. +func (m *Listener_ConnectionBalanceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofBalanceTypePresent := false + switch v := m.BalanceType.(type) { + case *Listener_ConnectionBalanceConfig_ExactBalance_: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true + + if all { + switch v := interface{}(m.GetExactBalance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactBalance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Listener_ConnectionBalanceConfig_ExtendBalance: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true + + if all { + switch v := interface{}(m.GetExtendBalance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtendBalance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofBalanceTypePresent { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Listener_ConnectionBalanceConfigMultiError(errors) + } + + return nil +} + +// Listener_ConnectionBalanceConfigMultiError is an error wrapping multiple +// validation errors returned by +// Listener_ConnectionBalanceConfig.ValidateAll() if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_ConnectionBalanceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_ConnectionBalanceConfigMultiError) AllErrors() []error { return m } + +// Listener_ConnectionBalanceConfigValidationError is the validation error +// returned by Listener_ConnectionBalanceConfig.Validate if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_ConnectionBalanceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_ConnectionBalanceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_ConnectionBalanceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_ConnectionBalanceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_ConnectionBalanceConfigValidationError) ErrorName() string { + return "Listener_ConnectionBalanceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_ConnectionBalanceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_ConnectionBalanceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_ConnectionBalanceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_ConnectionBalanceConfigValidationError{} + +// Validate checks the field values on Listener_InternalListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Listener_InternalListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_InternalListenerConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Listener_InternalListenerConfigMultiError, or nil if none found. +func (m *Listener_InternalListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_InternalListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Listener_InternalListenerConfigMultiError(errors) + } + + return nil +} + +// Listener_InternalListenerConfigMultiError is an error wrapping multiple +// validation errors returned by Listener_InternalListenerConfig.ValidateAll() +// if the designated constraints aren't met. +type Listener_InternalListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_InternalListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_InternalListenerConfigMultiError) AllErrors() []error { return m } + +// Listener_InternalListenerConfigValidationError is the validation error +// returned by Listener_InternalListenerConfig.Validate if the designated +// constraints aren't met. +type Listener_InternalListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_InternalListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_InternalListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_InternalListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_InternalListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_InternalListenerConfigValidationError) ErrorName() string { + return "Listener_InternalListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_InternalListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_InternalListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_InternalListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_InternalListenerConfigValidationError{} + +// Validate checks the field values on +// Listener_ConnectionBalanceConfig_ExactBalance with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listener_ConnectionBalanceConfig_ExactBalance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Listener_ConnectionBalanceConfig_ExactBalance with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Listener_ConnectionBalanceConfig_ExactBalanceMultiError, or nil if none found. +func (m *Listener_ConnectionBalanceConfig_ExactBalance) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Listener_ConnectionBalanceConfig_ExactBalanceMultiError(errors) + } + + return nil +} + +// Listener_ConnectionBalanceConfig_ExactBalanceMultiError is an error wrapping +// multiple validation errors returned by +// Listener_ConnectionBalanceConfig_ExactBalance.ValidateAll() if the +// designated constraints aren't met. +type Listener_ConnectionBalanceConfig_ExactBalanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_ConnectionBalanceConfig_ExactBalanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_ConnectionBalanceConfig_ExactBalanceMultiError) AllErrors() []error { return m } + +// Listener_ConnectionBalanceConfig_ExactBalanceValidationError is the +// validation error returned by +// Listener_ConnectionBalanceConfig_ExactBalance.Validate if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfig_ExactBalanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) ErrorName() string { + return "Listener_ConnectionBalanceConfig_ExactBalanceValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_ConnectionBalanceConfig_ExactBalance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_ConnectionBalanceConfig_ExactBalanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_ConnectionBalanceConfig_ExactBalanceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go new file mode 100644 index 000000000..b6dad7e30 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go @@ -0,0 +1,1353 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FilterChainMatch_ConnectionSourceType int32 + +const ( + // Any connection source matches. + FilterChainMatch_ANY FilterChainMatch_ConnectionSourceType = 0 + // Match a connection originating from the same host. + FilterChainMatch_SAME_IP_OR_LOOPBACK FilterChainMatch_ConnectionSourceType = 1 + // Match a connection originating from a different host. + FilterChainMatch_EXTERNAL FilterChainMatch_ConnectionSourceType = 2 +) + +// Enum value maps for FilterChainMatch_ConnectionSourceType. +var ( + FilterChainMatch_ConnectionSourceType_name = map[int32]string{ + 0: "ANY", + 1: "SAME_IP_OR_LOOPBACK", + 2: "EXTERNAL", + } + FilterChainMatch_ConnectionSourceType_value = map[string]int32{ + "ANY": 0, + "SAME_IP_OR_LOOPBACK": 1, + "EXTERNAL": 2, + } +) + +func (x FilterChainMatch_ConnectionSourceType) Enum() *FilterChainMatch_ConnectionSourceType { + p := new(FilterChainMatch_ConnectionSourceType) + *p = x + return p +} + +func (x FilterChainMatch_ConnectionSourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FilterChainMatch_ConnectionSourceType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_listener_v3_listener_components_proto_enumTypes[0].Descriptor() +} + +func (FilterChainMatch_ConnectionSourceType) Type() protoreflect.EnumType { + return &file_envoy_config_listener_v3_listener_components_proto_enumTypes[0] +} + +func (x FilterChainMatch_ConnectionSourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FilterChainMatch_ConnectionSourceType.Descriptor instead. +func (FilterChainMatch_ConnectionSourceType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{1, 0} +} + +// [#next-free-field: 6] +type Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *Filter_TypedConfig + // *Filter_ConfigDiscovery + ConfigType isFilter_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *Filter) Reset() { + *x = Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Filter) ProtoMessage() {} + +func (x *Filter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Filter.ProtoReflect.Descriptor instead. +func (*Filter) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{0} +} + +func (x *Filter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Filter) GetConfigType() isFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *Filter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*Filter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *Filter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*Filter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +type isFilter_ConfigType interface { + isFilter_ConfigType() +} + +type Filter_TypedConfig struct { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.network] + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type Filter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*Filter_TypedConfig) isFilter_ConfigType() {} + +func (*Filter_ConfigDiscovery) isFilter_ConfigType() {} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI “www.example.com“ the most specific match would be +// “www.example.com“, then “*.example.com“, then “*.com“, then any filter +// chain without “server_names“ requirements). +// +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 14] +type FilterChainMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + DestinationPort *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + PrefixRanges []*v3.CidrRange `protobuf:"bytes,3,rep,name=prefix_ranges,json=prefixRanges,proto3" json:"prefix_ranges,omitempty"` + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + AddressSuffix string `protobuf:"bytes,4,opt,name=address_suffix,json=addressSuffix,proto3" json:"address_suffix,omitempty"` + // [#not-implemented-hide:] + SuffixLen *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=suffix_len,json=suffixLen,proto3" json:"suffix_len,omitempty"` + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + DirectSourcePrefixRanges []*v3.CidrRange `protobuf:"bytes,13,rep,name=direct_source_prefix_ranges,json=directSourcePrefixRanges,proto3" json:"direct_source_prefix_ranges,omitempty"` + // Specifies the connection source IP match type. Can be any, local or external network. + SourceType FilterChainMatch_ConnectionSourceType `protobuf:"varint,12,opt,name=source_type,json=sourceType,proto3,enum=envoy.config.listener.v3.FilterChainMatch_ConnectionSourceType" json:"source_type,omitempty"` + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + SourcePrefixRanges []*v3.CidrRange `protobuf:"bytes,6,rep,name=source_prefix_ranges,json=sourcePrefixRanges,proto3" json:"source_prefix_ranges,omitempty"` + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + SourcePorts []uint32 `protobuf:"varint,7,rep,packed,name=source_ports,json=sourcePorts,proto3" json:"source_ports,omitempty"` + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. “www.example.com“ + // will be first matched against “www.example.com“, then “*.example.com“, then “*.com“. + // + // Note that partial wildcards are not supported, and values like “*w.example.com“ are invalid. + // The value “*“ is also not supported, and “server_names“ should be omitted instead. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + ServerNames []string `protobuf:"bytes,11,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"` + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // - “raw_buffer“ - default, used when no transport protocol is detected, + // - “tls“ - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + TransportProtocol string `protobuf:"bytes,9,opt,name=transport_protocol,json=transportProtocol,proto3" json:"transport_protocol,omitempty"` + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // - “http/1.1“ - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // - “h2“ - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + ApplicationProtocols []string `protobuf:"bytes,10,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` +} + +func (x *FilterChainMatch) Reset() { + *x = FilterChainMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChainMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChainMatch) ProtoMessage() {} + +func (x *FilterChainMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChainMatch.ProtoReflect.Descriptor instead. +func (*FilterChainMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{1} +} + +func (x *FilterChainMatch) GetDestinationPort() *wrapperspb.UInt32Value { + if x != nil { + return x.DestinationPort + } + return nil +} + +func (x *FilterChainMatch) GetPrefixRanges() []*v3.CidrRange { + if x != nil { + return x.PrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetAddressSuffix() string { + if x != nil { + return x.AddressSuffix + } + return "" +} + +func (x *FilterChainMatch) GetSuffixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.SuffixLen + } + return nil +} + +func (x *FilterChainMatch) GetDirectSourcePrefixRanges() []*v3.CidrRange { + if x != nil { + return x.DirectSourcePrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetSourceType() FilterChainMatch_ConnectionSourceType { + if x != nil { + return x.SourceType + } + return FilterChainMatch_ANY +} + +func (x *FilterChainMatch) GetSourcePrefixRanges() []*v3.CidrRange { + if x != nil { + return x.SourcePrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetSourcePorts() []uint32 { + if x != nil { + return x.SourcePorts + } + return nil +} + +func (x *FilterChainMatch) GetServerNames() []string { + if x != nil { + return x.ServerNames + } + return nil +} + +func (x *FilterChainMatch) GetTransportProtocol() string { + if x != nil { + return x.TransportProtocol + } + return "" +} + +func (x *FilterChainMatch) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 10] +type FilterChain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch *FilterChainMatch `protobuf:"bytes,1,opt,name=filter_chain_match,json=filterChainMatch,proto3" json:"filter_chain_match,omitempty"` + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + // + // For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + // can be created, but due to differences in the connection implementation compared + // to TCP, the onData() method will never be called. Therefore, network filters + // for QUIC listeners should only expect to do work at the start of a new connection + // (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. + Filters []*Filter `protobuf:"bytes,3,rep,name=filters,proto3" json:"filters,omitempty"` + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. + UseProxyProto *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_proxy_proto,json=useProxyProto,proto3" json:"use_proxy_proto,omitempty"` + // [#not-implemented-hide:] filter chain metadata. + Metadata *v3.Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name “envoy.transport_sockets.tls“ and + // :ref:`DownstreamTlsContext ` in the “typed_config“. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + // [#extension-category: envoy.transport_sockets.downstream] + TransportSocket *v3.TransportSocket `protobuf:"bytes,6,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` + // If present and nonzero, the amount of time to allow incoming connections to complete any + // transport socket negotiations. If this expires before the transport reports connection + // establishment, the connection is summarily closed. + TransportSocketConnectTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=transport_socket_connect_timeout,json=transportSocketConnectTimeout,proto3" json:"transport_socket_connect_timeout,omitempty"` + // The unique name (or empty) by which this filter chain is known. + // Note: :ref:`filter_chain_matcher + // ` + // requires that filter chains are uniquely named within a listener. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. + // If this field is not empty, the filter chain will be built on-demand. + // Otherwise, the filter chain will be built normally and block listener warming. + OnDemandConfiguration *FilterChain_OnDemandConfiguration `protobuf:"bytes,8,opt,name=on_demand_configuration,json=onDemandConfiguration,proto3" json:"on_demand_configuration,omitempty"` +} + +func (x *FilterChain) Reset() { + *x = FilterChain{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChain) ProtoMessage() {} + +func (x *FilterChain) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChain.ProtoReflect.Descriptor instead. +func (*FilterChain) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{2} +} + +func (x *FilterChain) GetFilterChainMatch() *FilterChainMatch { + if x != nil { + return x.FilterChainMatch + } + return nil +} + +func (x *FilterChain) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. +func (x *FilterChain) GetUseProxyProto() *wrapperspb.BoolValue { + if x != nil { + return x.UseProxyProto + } + return nil +} + +func (x *FilterChain) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *FilterChain) GetTransportSocket() *v3.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +func (x *FilterChain) GetTransportSocketConnectTimeout() *durationpb.Duration { + if x != nil { + return x.TransportSocketConnectTimeout + } + return nil +} + +func (x *FilterChain) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FilterChain) GetOnDemandConfiguration() *FilterChain_OnDemandConfiguration { + if x != nil { + return x.OnDemandConfiguration + } + return nil +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3307 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +type ListenerFilterChainMatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *ListenerFilterChainMatchPredicate_OrMatch + // *ListenerFilterChainMatchPredicate_AndMatch + // *ListenerFilterChainMatchPredicate_NotMatch + // *ListenerFilterChainMatchPredicate_AnyMatch + // *ListenerFilterChainMatchPredicate_DestinationPortRange + Rule isListenerFilterChainMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *ListenerFilterChainMatchPredicate) Reset() { + *x = ListenerFilterChainMatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilterChainMatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilterChainMatchPredicate) ProtoMessage() {} + +func (x *ListenerFilterChainMatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilterChainMatchPredicate.ProtoReflect.Descriptor instead. +func (*ListenerFilterChainMatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{3} +} + +func (m *ListenerFilterChainMatchPredicate) GetRule() isListenerFilterChainMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetOrMatch() *ListenerFilterChainMatchPredicate_MatchSet { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetAndMatch() *ListenerFilterChainMatchPredicate_MatchSet { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetNotMatch() *ListenerFilterChainMatchPredicate { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *ListenerFilterChainMatchPredicate) GetDestinationPortRange() *v31.Int32Range { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_DestinationPortRange); ok { + return x.DestinationPortRange + } + return nil +} + +type isListenerFilterChainMatchPredicate_Rule interface { + isListenerFilterChainMatchPredicate_Rule() +} + +type ListenerFilterChainMatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *ListenerFilterChainMatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *ListenerFilterChainMatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *ListenerFilterChainMatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_DestinationPortRange struct { + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + DestinationPortRange *v31.Int32Range `protobuf:"bytes,5,opt,name=destination_port_range,json=destinationPortRange,proto3,oneof"` +} + +func (*ListenerFilterChainMatchPredicate_OrMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_AndMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_NotMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_AnyMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_DestinationPortRange) isListenerFilterChainMatchPredicate_Rule() { +} + +// [#next-free-field: 6] +type ListenerFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *ListenerFilter_TypedConfig + // *ListenerFilter_ConfigDiscovery + ConfigType isListenerFilter_ConfigType `protobuf_oneof:"config_type"` + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + FilterDisabled *ListenerFilterChainMatchPredicate `protobuf:"bytes,4,opt,name=filter_disabled,json=filterDisabled,proto3" json:"filter_disabled,omitempty"` +} + +func (x *ListenerFilter) Reset() { + *x = ListenerFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilter) ProtoMessage() {} + +func (x *ListenerFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilter.ProtoReflect.Descriptor instead. +func (*ListenerFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{4} +} + +func (x *ListenerFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ListenerFilter) GetConfigType() isListenerFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ListenerFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ListenerFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *ListenerFilter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*ListenerFilter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +func (x *ListenerFilter) GetFilterDisabled() *ListenerFilterChainMatchPredicate { + if x != nil { + return x.FilterDisabled + } + return nil +} + +type isListenerFilter_ConfigType interface { + isListenerFilter_ConfigType() +} + +type ListenerFilter_TypedConfig struct { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type ListenerFilter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*ListenerFilter_TypedConfig) isListenerFilter_ConfigType() {} + +func (*ListenerFilter_ConfigDiscovery) isListenerFilter_ConfigType() {} + +// The configuration for on-demand filter chain. If this field is not empty in FilterChain message, +// a filter chain will be built on-demand. +// On-demand filter chains help speedup the warming up of listeners since the building and initialization of +// an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. +// Filter chains that are not often used can be set as on-demand. +type FilterChain_OnDemandConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timeout to wait for filter chain placeholders to complete rebuilding. + // 1. If this field is set to 0, timeout is disabled. + // 2. If not specified, a default timeout of 15s is used. + // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. + // Upon failure or timeout, all connections related to this filter chain will be closed. + // Rebuilding will start again on the next new connection. + RebuildTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=rebuild_timeout,json=rebuildTimeout,proto3" json:"rebuild_timeout,omitempty"` +} + +func (x *FilterChain_OnDemandConfiguration) Reset() { + *x = FilterChain_OnDemandConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChain_OnDemandConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChain_OnDemandConfiguration) ProtoMessage() {} + +func (x *FilterChain_OnDemandConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChain_OnDemandConfiguration.ProtoReflect.Descriptor instead. +func (*FilterChain_OnDemandConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *FilterChain_OnDemandConfiguration) GetRebuildTimeout() *durationpb.Duration { + if x != nil { + return x.RebuildTimeout + } + return nil +} + +// A set of match configurations used for logical operations. +type ListenerFilterChainMatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*ListenerFilterChainMatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) Reset() { + *x = ListenerFilterChainMatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilterChainMatchPredicate_MatchSet) ProtoMessage() {} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilterChainMatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*ListenerFilterChainMatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) GetRules() []*ListenerFilterChainMatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +var File_envoy_config_listener_v3_listener_components_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_listener_components_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x82, 0x02, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xea, 0x06, 0x0a, 0x10, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x54, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x18, 0xff, 0xff, 0x03, + 0x28, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x44, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x12, 0x3b, 0x0a, 0x0a, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x09, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x12, 0x5e, 0x0a, + 0x1b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x18, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x6a, 0x0a, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0d, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x2a, 0x06, 0x18, 0xff, + 0xff, 0x03, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x46, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x41, 0x4d, + 0x45, 0x5f, 0x49, 0x50, 0x5f, 0x4f, 0x52, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, + 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x02, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x89, 0x06, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x58, 0x0a, 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x10, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x3a, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0f, 0x75, + 0x73, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0d, 0x75, + 0x73, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3a, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x62, 0x0a, 0x20, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x1d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x73, 0x0a, 0x17, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x2e, 0x4f, 0x6e, 0x44, 0x65, 0x6d, + 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x15, 0x6f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x15, 0x4f, 0x6e, 0x44, 0x65, 0x6d, + 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x22, 0xc2, 0x05, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x61, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x63, 0x0a, 0x09, 0x61, 0x6e, + 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x5a, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, + 0x6e, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x51, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x74, 0x12, 0x5b, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x3a, 0x47, 0x9a, 0xc5, 0x88, 0x1e, 0x42, 0x0a, 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, + 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf2, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x0f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x17, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_listener_components_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_listener_components_proto_rawDescData = file_envoy_config_listener_v3_listener_components_proto_rawDesc +) + +func file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_listener_components_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_listener_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_listener_components_proto_rawDescData) + }) + return file_envoy_config_listener_v3_listener_components_proto_rawDescData +} + +var file_envoy_config_listener_v3_listener_components_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_listener_v3_listener_components_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_listener_v3_listener_components_proto_goTypes = []interface{}{ + (FilterChainMatch_ConnectionSourceType)(0), // 0: envoy.config.listener.v3.FilterChainMatch.ConnectionSourceType + (*Filter)(nil), // 1: envoy.config.listener.v3.Filter + (*FilterChainMatch)(nil), // 2: envoy.config.listener.v3.FilterChainMatch + (*FilterChain)(nil), // 3: envoy.config.listener.v3.FilterChain + (*ListenerFilterChainMatchPredicate)(nil), // 4: envoy.config.listener.v3.ListenerFilterChainMatchPredicate + (*ListenerFilter)(nil), // 5: envoy.config.listener.v3.ListenerFilter + (*FilterChain_OnDemandConfiguration)(nil), // 6: envoy.config.listener.v3.FilterChain.OnDemandConfiguration + (*ListenerFilterChainMatchPredicate_MatchSet)(nil), // 7: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 9: envoy.config.core.v3.ExtensionConfigSource + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*v3.CidrRange)(nil), // 11: envoy.config.core.v3.CidrRange + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*v3.Metadata)(nil), // 13: envoy.config.core.v3.Metadata + (*v3.TransportSocket)(nil), // 14: envoy.config.core.v3.TransportSocket + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + (*v31.Int32Range)(nil), // 16: envoy.type.v3.Int32Range +} +var file_envoy_config_listener_v3_listener_components_proto_depIdxs = []int32{ + 8, // 0: envoy.config.listener.v3.Filter.typed_config:type_name -> google.protobuf.Any + 9, // 1: envoy.config.listener.v3.Filter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 10, // 2: envoy.config.listener.v3.FilterChainMatch.destination_port:type_name -> google.protobuf.UInt32Value + 11, // 3: envoy.config.listener.v3.FilterChainMatch.prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 10, // 4: envoy.config.listener.v3.FilterChainMatch.suffix_len:type_name -> google.protobuf.UInt32Value + 11, // 5: envoy.config.listener.v3.FilterChainMatch.direct_source_prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 0, // 6: envoy.config.listener.v3.FilterChainMatch.source_type:type_name -> envoy.config.listener.v3.FilterChainMatch.ConnectionSourceType + 11, // 7: envoy.config.listener.v3.FilterChainMatch.source_prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 2, // 8: envoy.config.listener.v3.FilterChain.filter_chain_match:type_name -> envoy.config.listener.v3.FilterChainMatch + 1, // 9: envoy.config.listener.v3.FilterChain.filters:type_name -> envoy.config.listener.v3.Filter + 12, // 10: envoy.config.listener.v3.FilterChain.use_proxy_proto:type_name -> google.protobuf.BoolValue + 13, // 11: envoy.config.listener.v3.FilterChain.metadata:type_name -> envoy.config.core.v3.Metadata + 14, // 12: envoy.config.listener.v3.FilterChain.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 15, // 13: envoy.config.listener.v3.FilterChain.transport_socket_connect_timeout:type_name -> google.protobuf.Duration + 6, // 14: envoy.config.listener.v3.FilterChain.on_demand_configuration:type_name -> envoy.config.listener.v3.FilterChain.OnDemandConfiguration + 7, // 15: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.or_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + 7, // 16: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.and_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + 4, // 17: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.not_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 16, // 18: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.destination_port_range:type_name -> envoy.type.v3.Int32Range + 8, // 19: envoy.config.listener.v3.ListenerFilter.typed_config:type_name -> google.protobuf.Any + 9, // 20: envoy.config.listener.v3.ListenerFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 4, // 21: envoy.config.listener.v3.ListenerFilter.filter_disabled:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 15, // 22: envoy.config.listener.v3.FilterChain.OnDemandConfiguration.rebuild_timeout:type_name -> google.protobuf.Duration + 4, // 23: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet.rules:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_listener_components_proto_init() } +func file_envoy_config_listener_v3_listener_components_proto_init() { + if File_envoy_config_listener_v3_listener_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_listener_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChainMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilterChainMatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChain_OnDemandConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilterChainMatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Filter_TypedConfig)(nil), + (*Filter_ConfigDiscovery)(nil), + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*ListenerFilterChainMatchPredicate_OrMatch)(nil), + (*ListenerFilterChainMatchPredicate_AndMatch)(nil), + (*ListenerFilterChainMatchPredicate_NotMatch)(nil), + (*ListenerFilterChainMatchPredicate_AnyMatch)(nil), + (*ListenerFilterChainMatchPredicate_DestinationPortRange)(nil), + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*ListenerFilter_TypedConfig)(nil), + (*ListenerFilter_ConfigDiscovery)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_listener_components_proto_rawDesc, + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_listener_components_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_listener_components_proto_depIdxs, + EnumInfos: file_envoy_config_listener_v3_listener_components_proto_enumTypes, + MessageInfos: file_envoy_config_listener_v3_listener_components_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_listener_components_proto = out.File + file_envoy_config_listener_v3_listener_components_proto_rawDesc = nil + file_envoy_config_listener_v3_listener_components_proto_goTypes = nil + file_envoy_config_listener_v3_listener_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go new file mode 100644 index 000000000..4a18f8b08 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go @@ -0,0 +1,1644 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Filter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in FilterMultiError, or nil if none found. +func (m *Filter) ValidateAll() error { + return m.validate(true) +} + +func (m *Filter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := FilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *Filter_TypedConfig: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Filter_ConfigDiscovery: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return FilterMultiError(errors) + } + + return nil +} + +// FilterMultiError is an error wrapping multiple validation errors returned by +// Filter.ValidateAll() if the designated constraints aren't met. +type FilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterMultiError) AllErrors() []error { return m } + +// FilterValidationError is the validation error returned by Filter.Validate if +// the designated constraints aren't met. +type FilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterValidationError) ErrorName() string { return "FilterValidationError" } + +// Error satisfies the builtin error interface +func (e FilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterValidationError{} + +// Validate checks the field values on FilterChainMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *FilterChainMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChainMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterChainMatchMultiError, or nil if none found. +func (m *FilterChainMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChainMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetDestinationPort(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 65535 { + err := FilterChainMatchValidationError{ + field: "DestinationPort", + reason: "value must be inside range [1, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetPrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AddressSuffix + + if all { + switch v := interface{}(m.GetSuffixLen()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuffixLen()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetDirectSourcePrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := FilterChainMatch_ConnectionSourceType_name[int32(m.GetSourceType())]; !ok { + err := FilterChainMatchValidationError{ + field: "SourceType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetSourcePrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetSourcePorts() { + _, _ = idx, item + + if val := item; val < 1 || val > 65535 { + err := FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePorts[%v]", idx), + reason: "value must be inside range [1, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for TransportProtocol + + if len(errors) > 0 { + return FilterChainMatchMultiError(errors) + } + + return nil +} + +// FilterChainMatchMultiError is an error wrapping multiple validation errors +// returned by FilterChainMatch.ValidateAll() if the designated constraints +// aren't met. +type FilterChainMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChainMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChainMatchMultiError) AllErrors() []error { return m } + +// FilterChainMatchValidationError is the validation error returned by +// FilterChainMatch.Validate if the designated constraints aren't met. +type FilterChainMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChainMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChainMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChainMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChainMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChainMatchValidationError) ErrorName() string { return "FilterChainMatchValidationError" } + +// Error satisfies the builtin error interface +func (e FilterChainMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChainMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChainMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChainMatchValidationError{} + +// Validate checks the field values on FilterChain with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterChain) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChain with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterChainMultiError, or +// nil if none found. +func (m *FilterChain) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChain) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFilterChainMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterChainMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUseProxyProto()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseProxyProto()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocketConnectTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocketConnectTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetOnDemandConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnDemandConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterChainMultiError(errors) + } + + return nil +} + +// FilterChainMultiError is an error wrapping multiple validation errors +// returned by FilterChain.ValidateAll() if the designated constraints aren't met. +type FilterChainMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChainMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChainMultiError) AllErrors() []error { return m } + +// FilterChainValidationError is the validation error returned by +// FilterChain.Validate if the designated constraints aren't met. +type FilterChainValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChainValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChainValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChainValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChainValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChainValidationError) ErrorName() string { return "FilterChainValidationError" } + +// Error satisfies the builtin error interface +func (e FilterChainValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChain.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChainValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChainValidationError{} + +// Validate checks the field values on ListenerFilterChainMatchPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenerFilterChainMatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerFilterChainMatchPredicate +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenerFilterChainMatchPredicateMultiError, or nil if none found. +func (m *ListenerFilterChainMatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *ListenerFilterChainMatchPredicate_OrMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_AndMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_NotMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_AnyMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *ListenerFilterChainMatchPredicate_DestinationPortRange: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationPortRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationPortRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ListenerFilterChainMatchPredicateMultiError(errors) + } + + return nil +} + +// ListenerFilterChainMatchPredicateMultiError is an error wrapping multiple +// validation errors returned by +// ListenerFilterChainMatchPredicate.ValidateAll() if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterChainMatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterChainMatchPredicateMultiError) AllErrors() []error { return m } + +// ListenerFilterChainMatchPredicateValidationError is the validation error +// returned by ListenerFilterChainMatchPredicate.Validate if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterChainMatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterChainMatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterChainMatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterChainMatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterChainMatchPredicateValidationError) ErrorName() string { + return "ListenerFilterChainMatchPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerFilterChainMatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilterChainMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterChainMatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterChainMatchPredicateValidationError{} + +// Validate checks the field values on ListenerFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerFilterMultiError, +// or nil if none found. +func (m *ListenerFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ListenerFilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilterDisabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterDisabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ConfigType.(type) { + case *ListenerFilter_TypedConfig: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilter_ConfigDiscovery: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ListenerFilterMultiError(errors) + } + + return nil +} + +// ListenerFilterMultiError is an error wrapping multiple validation errors +// returned by ListenerFilter.ValidateAll() if the designated constraints +// aren't met. +type ListenerFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterMultiError) AllErrors() []error { return m } + +// ListenerFilterValidationError is the validation error returned by +// ListenerFilter.Validate if the designated constraints aren't met. +type ListenerFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterValidationError) ErrorName() string { return "ListenerFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterValidationError{} + +// Validate checks the field values on FilterChain_OnDemandConfiguration with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *FilterChain_OnDemandConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChain_OnDemandConfiguration +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// FilterChain_OnDemandConfigurationMultiError, or nil if none found. +func (m *FilterChain_OnDemandConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChain_OnDemandConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRebuildTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRebuildTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterChain_OnDemandConfigurationMultiError(errors) + } + + return nil +} + +// FilterChain_OnDemandConfigurationMultiError is an error wrapping multiple +// validation errors returned by +// FilterChain_OnDemandConfiguration.ValidateAll() if the designated +// constraints aren't met. +type FilterChain_OnDemandConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChain_OnDemandConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChain_OnDemandConfigurationMultiError) AllErrors() []error { return m } + +// FilterChain_OnDemandConfigurationValidationError is the validation error +// returned by FilterChain_OnDemandConfiguration.Validate if the designated +// constraints aren't met. +type FilterChain_OnDemandConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChain_OnDemandConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChain_OnDemandConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChain_OnDemandConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChain_OnDemandConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChain_OnDemandConfigurationValidationError) ErrorName() string { + return "FilterChain_OnDemandConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e FilterChain_OnDemandConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChain_OnDemandConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChain_OnDemandConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChain_OnDemandConfigurationValidationError{} + +// Validate checks the field values on +// ListenerFilterChainMatchPredicate_MatchSet with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerFilterChainMatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ListenerFilterChainMatchPredicate_MatchSet with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ListenerFilterChainMatchPredicate_MatchSetMultiError, or nil if none found. +func (m *ListenerFilterChainMatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerFilterChainMatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// ListenerFilterChainMatchPredicate_MatchSetMultiError is an error wrapping +// multiple validation errors returned by +// ListenerFilterChainMatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterChainMatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterChainMatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// ListenerFilterChainMatchPredicate_MatchSetValidationError is the validation +// error returned by ListenerFilterChainMatchPredicate_MatchSet.Validate if +// the designated constraints aren't met. +type ListenerFilterChainMatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) ErrorName() string { + return "ListenerFilterChainMatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilterChainMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterChainMatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterChainMatchPredicate_MatchSetValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go new file mode 100644 index 000000000..7896458b8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go @@ -0,0 +1,1213 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Filter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*Filter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Filter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FilterChainMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChainMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChainMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for iNdEx := len(m.DirectSourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DirectSourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectSourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.SourceType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SourceType)) + i-- + dAtA[i] = 0x60 + } + if len(m.ServerNames) > 0 { + for iNdEx := len(m.ServerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServerNames[iNdEx]) + copy(dAtA[i:], m.ServerNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerNames[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ApplicationProtocols) > 0 { + for iNdEx := len(m.ApplicationProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ApplicationProtocols[iNdEx]) + copy(dAtA[i:], m.ApplicationProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ApplicationProtocols[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.TransportProtocol) > 0 { + i -= len(m.TransportProtocol) + copy(dAtA[i:], m.TransportProtocol) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TransportProtocol))) + i-- + dAtA[i] = 0x4a + } + if m.DestinationPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.DestinationPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.SourcePorts) > 0 { + var pksize2 int + for _, num := range m.SourcePorts { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.SourcePorts { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if len(m.SourcePrefixRanges) > 0 { + for iNdEx := len(m.SourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SuffixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuffixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.AddressSuffix) > 0 { + i -= len(m.AddressSuffix) + copy(dAtA[i:], m.AddressSuffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressSuffix))) + i-- + dAtA[i] = 0x22 + } + if len(m.PrefixRanges) > 0 { + for iNdEx := len(m.PrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.PrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + return len(dAtA) - i, nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RebuildTimeout != nil { + size, err := (*durationpb.Duration)(m.RebuildTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterChain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocketConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.TransportSocketConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OnDemandConfiguration != nil { + size, err := m.OnDemandConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x3a + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.UseProxyProto != nil { + size, err := (*wrapperspb.BoolValue)(m.UseProxyProto).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.FilterChainMatch != nil { + size, err := m.FilterChainMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ListenerFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.FilterDisabled != nil { + size, err := m.FilterDisabled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigType.(*ListenerFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Filter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Filter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FilterChainMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PrefixRanges) > 0 { + for _, e := range m.PrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.AddressSuffix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuffixLen != nil { + l = (*wrapperspb.UInt32Value)(m.SuffixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SourcePrefixRanges) > 0 { + for _, e := range m.SourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SourcePorts) > 0 { + l = 0 + for _, e := range m.SourcePorts { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.DestinationPort != nil { + l = (*wrapperspb.UInt32Value)(m.DestinationPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TransportProtocol) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ApplicationProtocols) > 0 { + for _, s := range m.ApplicationProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ServerNames) > 0 { + for _, s := range m.ServerNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SourceType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SourceType)) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for _, e := range m.DirectSourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain_OnDemandConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RebuildTimeout != nil { + l = (*durationpb.Duration)(m.RebuildTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterChainMatch != nil { + l = m.FilterChainMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseProxyProto != nil { + l = (*wrapperspb.BoolValue)(m.UseProxyProto).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemandConfiguration != nil { + l = m.OnDemandConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketConnectTimeout != nil { + l = (*durationpb.Duration)(m.TransportSocketConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.FilterDisabled != nil { + l = m.FilterDisabled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go new file mode 100644 index 000000000..cdccfea8e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go @@ -0,0 +1,1297 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + if vtmsg, ok := interface{}(m.SocketOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Entries[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Listener_DeprecatedV1) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_DeprecatedV1) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_DeprecatedV1) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExtendBalance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExactBalance_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactBalance != nil { + size, err := m.ExactBalance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendBalance != nil { + if vtmsg, ok := interface{}(m.ExtendBalance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExtendBalance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Listener_InternalListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_InternalListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BypassOverloadManager { + i-- + if m.BypassOverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x98 + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + } + if m.FilterChainMatcher != nil { + if vtmsg, ok := interface{}(m.FilterChainMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterChainMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.EnableMptcp { + i-- + if m.EnableMptcp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.EnableReusePort != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableReusePort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if msg, ok := m.ListenerSpecifier.(*Listener_InternalListener); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.DefaultFilterChain != nil { + size, err := m.DefaultFilterChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TcpBacklogSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpBacklogSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.ReusePort { + i-- + if m.ReusePort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.ConnectionBalanceConfig != nil { + size, err := m.ConnectionBalanceConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ApiListener != nil { + size, err := m.ApiListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.UdpListenerConfig != nil { + size, err := m.UdpListenerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.ContinueOnListenerFiltersTimeout { + i-- + if m.ContinueOnListenerFiltersTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.TrafficDirection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrafficDirection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.ListenerFiltersTimeout != nil { + size, err := (*durationpb.Duration)(m.ListenerFiltersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.TcpFastOpenQueueLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.Transparent != nil { + size, err := (*wrapperspb.BoolValue)(m.Transparent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if len(m.ListenerFilters) > 0 { + for iNdEx := len(m.ListenerFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.DrainType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainType)) + i-- + dAtA[i] = 0x40 + } + if m.DeprecatedV1 != nil { + size, err := m.DeprecatedV1.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseOriginalDst != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOriginalDst).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.FilterChains) > 0 { + for iNdEx := len(m.FilterChains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FilterChains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_InternalListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InternalListener != nil { + size, err := m.InternalListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + return len(dAtA) - i, nil +} +func (m *ListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValidationListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValidationListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ApiListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + if size, ok := interface{}(m.SocketOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SocketOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_DeprecatedV1) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.BalanceType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactBalance != nil { + l = m.ExactBalance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendBalance != nil { + if size, ok := interface{}(m.ExtendBalance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExtendBalance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_InternalListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterChains) > 0 { + for _, e := range m.FilterChains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseOriginalDst != nil { + l = (*wrapperspb.BoolValue)(m.UseOriginalDst).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeprecatedV1 != nil { + l = m.DeprecatedV1.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DrainType)) + } + if len(m.ListenerFilters) > 0 { + for _, e := range m.ListenerFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Transparent != nil { + l = (*wrapperspb.BoolValue)(m.Transparent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TcpFastOpenQueueLength != nil { + l = (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ListenerFiltersTimeout != nil { + l = (*durationpb.Duration)(m.ListenerFiltersTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrafficDirection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.TrafficDirection)) + } + if m.ContinueOnListenerFiltersTimeout { + n += 3 + } + if m.UdpListenerConfig != nil { + l = m.UdpListenerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApiListener != nil { + l = m.ApiListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionBalanceConfig != nil { + l = m.ConnectionBalanceConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReusePort { + n += 3 + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TcpBacklogSize != nil { + l = (*wrapperspb.UInt32Value)(m.TcpBacklogSize).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultFilterChain != nil { + l = m.DefaultFilterChain.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ListenerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableReusePort != nil { + l = (*wrapperspb.BoolValue)(m.EnableReusePort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableMptcp { + n += 3 + } + if m.IgnoreGlobalConnLimit { + n += 3 + } + if m.FilterChainMatcher != nil { + if size, ok := interface{}(m.FilterChainMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterChainMatcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BypassOverloadManager { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_InternalListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InternalListener != nil { + l = m.InternalListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValidationListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApiListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go new file mode 100644 index 000000000..a2f3a48e2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration specific to the UDP QUIC listener. +// [#next-free-field: 12] +type QuicProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuicProtocolOptions *v3.QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"` + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. + // + // If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. + IdleTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // Connection timeout in milliseconds before the crypto handshake is finished. + // + // If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. + CryptoHandshakeTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=crypto_handshake_timeout,json=cryptoHandshakeTimeout,proto3" json:"crypto_handshake_timeout,omitempty"` + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + Enabled *v3.RuntimeFeatureFlag `protobuf:"bytes,4,opt,name=enabled,proto3" json:"enabled,omitempty"` + // A multiplier to number of connections which is used to determine how many packets to read per + // event loop. A reasonable number should allow the listener to process enough payload but not + // starve TCP and other UDP sockets and also prevent long event loop duration. + // The default value is 32. This means if there are N QUIC connections, the total number of + // packets to read in each read event will be 32 * N. + // The actual number of packets to read in total by the UDP listener is also + // bound by 6000, regardless of this field or how many connections there are. + PacketsToReadToConnectionCountRatio *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=packets_to_read_to_connection_count_ratio,json=packetsToReadToConnectionCountRatio,proto3" json:"packets_to_read_to_connection_count_ratio,omitempty"` + // Configure which implementation of “quic::QuicCryptoClientStreamBase“ to be used for this listener. + // If not specified the :ref:`QUICHE default one configured by ` will be used. + // [#extension-category: envoy.quic.server.crypto_stream] + CryptoStreamConfig *v3.TypedExtensionConfig `protobuf:"bytes,6,opt,name=crypto_stream_config,json=cryptoStreamConfig,proto3" json:"crypto_stream_config,omitempty"` + // Configure which implementation of “quic::ProofSource“ to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.proof_source] + ProofSourceConfig *v3.TypedExtensionConfig `protobuf:"bytes,7,opt,name=proof_source_config,json=proofSourceConfig,proto3" json:"proof_source_config,omitempty"` + // Config which implementation of “quic::ConnectionIdGeneratorInterface“ to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.connection_id_generator] + ConnectionIdGeneratorConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=connection_id_generator_config,json=connectionIdGeneratorConfig,proto3" json:"connection_id_generator_config,omitempty"` + // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with. + // If not specified, Envoy will not advertise any server's preferred address. + // [#extension-category: envoy.quic.server_preferred_address] + ServerPreferredAddressConfig *v3.TypedExtensionConfig `protobuf:"bytes,9,opt,name=server_preferred_address_config,json=serverPreferredAddressConfig,proto3" json:"server_preferred_address_config,omitempty"` + // Configure the server to send transport parameter `disable_active_migration `_. + // Defaults to false (do not send this transport parameter). + SendDisableActiveMigration *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=send_disable_active_migration,json=sendDisableActiveMigration,proto3" json:"send_disable_active_migration,omitempty"` + // Configure which implementation of “quic::QuicConnectionDebugVisitor“ to be used for this listener. + // If not specified, no debug visitor will be attached to connections. + // [#extension-category: envoy.quic.connection_debug_visitor] + ConnectionDebugVisitorConfig *v3.TypedExtensionConfig `protobuf:"bytes,11,opt,name=connection_debug_visitor_config,json=connectionDebugVisitorConfig,proto3" json:"connection_debug_visitor_config,omitempty"` +} + +func (x *QuicProtocolOptions) Reset() { + *x = QuicProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_quic_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicProtocolOptions) ProtoMessage() {} + +func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_quic_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead. +func (*QuicProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_quic_config_proto_rawDescGZIP(), []int{0} +} + +func (x *QuicProtocolOptions) GetQuicProtocolOptions() *v3.QuicProtocolOptions { + if x != nil { + return x.QuicProtocolOptions + } + return nil +} + +func (x *QuicProtocolOptions) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *QuicProtocolOptions) GetCryptoHandshakeTimeout() *durationpb.Duration { + if x != nil { + return x.CryptoHandshakeTimeout + } + return nil +} + +func (x *QuicProtocolOptions) GetEnabled() *v3.RuntimeFeatureFlag { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *QuicProtocolOptions) GetPacketsToReadToConnectionCountRatio() *wrapperspb.UInt32Value { + if x != nil { + return x.PacketsToReadToConnectionCountRatio + } + return nil +} + +func (x *QuicProtocolOptions) GetCryptoStreamConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.CryptoStreamConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetProofSourceConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ProofSourceConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionIdGeneratorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionIdGeneratorConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetServerPreferredAddressConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ServerPreferredAddressConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetSendDisableActiveMigration() *wrapperspb.BoolValue { + if x != nil { + return x.SendDisableActiveMigration + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionDebugVisitorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionDebugVisitorConfig + } + return nil +} + +var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x69, 0x63, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, + 0x08, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x53, 0x0a, 0x18, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, + 0x6c, 0x61, 0x67, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x7d, 0x0a, 0x29, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x23, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, + 0x6f, 0x52, 0x65, 0x61, 0x64, 0x54, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x5c, 0x0a, 0x14, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, 0x13, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x1d, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x73, 0x65, 0x6e, 0x64, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x56, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_listener_v3_quic_config_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_quic_config_proto_rawDescData = file_envoy_config_listener_v3_quic_config_proto_rawDesc +) + +func file_envoy_config_listener_v3_quic_config_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_quic_config_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_quic_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_quic_config_proto_rawDescData) + }) + return file_envoy_config_listener_v3_quic_config_proto_rawDescData +} + +var file_envoy_config_listener_v3_quic_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_listener_v3_quic_config_proto_goTypes = []interface{}{ + (*QuicProtocolOptions)(nil), // 0: envoy.config.listener.v3.QuicProtocolOptions + (*v3.QuicProtocolOptions)(nil), // 1: envoy.config.core.v3.QuicProtocolOptions + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*v3.RuntimeFeatureFlag)(nil), // 3: envoy.config.core.v3.RuntimeFeatureFlag + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*v3.TypedExtensionConfig)(nil), // 5: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue +} +var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 2, // 1: envoy.config.listener.v3.QuicProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 2, // 2: envoy.config.listener.v3.QuicProtocolOptions.crypto_handshake_timeout:type_name -> google.protobuf.Duration + 3, // 3: envoy.config.listener.v3.QuicProtocolOptions.enabled:type_name -> envoy.config.core.v3.RuntimeFeatureFlag + 4, // 4: envoy.config.listener.v3.QuicProtocolOptions.packets_to_read_to_connection_count_ratio:type_name -> google.protobuf.UInt32Value + 5, // 5: envoy.config.listener.v3.QuicProtocolOptions.crypto_stream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 6: envoy.config.listener.v3.QuicProtocolOptions.proof_source_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 7: envoy.config.listener.v3.QuicProtocolOptions.connection_id_generator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 6, // 9: envoy.config.listener.v3.QuicProtocolOptions.send_disable_active_migration:type_name -> google.protobuf.BoolValue + 5, // 10: envoy.config.listener.v3.QuicProtocolOptions.connection_debug_visitor_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_quic_config_proto_init() } +func file_envoy_config_listener_v3_quic_config_proto_init() { + if File_envoy_config_listener_v3_quic_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_quic_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_quic_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_quic_config_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_quic_config_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_quic_config_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_quic_config_proto = out.File + file_envoy_config_listener_v3_quic_config_proto_rawDesc = nil + file_envoy_config_listener_v3_quic_config_proto_goTypes = nil + file_envoy_config_listener_v3_quic_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go new file mode 100644 index 000000000..efabacb66 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -0,0 +1,444 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicProtocolOptionsMultiError, or nil if none found. +func (m *QuicProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetQuicProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCryptoHandshakeTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCryptoHandshakeTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetPacketsToReadToConnectionCountRatio(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := QuicProtocolOptionsValidationError{ + field: "PacketsToReadToConnectionCountRatio", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetCryptoStreamConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCryptoStreamConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetProofSourceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProofSourceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionIdGeneratorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionIdGeneratorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetServerPreferredAddressConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServerPreferredAddressConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSendDisableActiveMigration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSendDisableActiveMigration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionDebugVisitorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionDebugVisitorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return QuicProtocolOptionsMultiError(errors) + } + + return nil +} + +// QuicProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by QuicProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type QuicProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicProtocolOptionsMultiError) AllErrors() []error { return m } + +// QuicProtocolOptionsValidationError is the validation error returned by +// QuicProtocolOptions.Validate if the designated constraints aren't met. +type QuicProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicProtocolOptionsValidationError) ErrorName() string { + return "QuicProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicProtocolOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go new file mode 100644 index 000000000..4b0826804 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go @@ -0,0 +1,345 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionDebugVisitorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionDebugVisitorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.SendDisableActiveMigration != nil { + size, err := (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.ServerPreferredAddressConfig != nil { + if vtmsg, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServerPreferredAddressConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ConnectionIdGeneratorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionIdGeneratorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ProofSourceConfig != nil { + if vtmsg, ok := interface{}(m.ProofSourceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProofSourceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CryptoStreamConfig != nil { + if vtmsg, ok := interface{}(m.CryptoStreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CryptoStreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PacketsToReadToConnectionCountRatio != nil { + size, err := (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Enabled != nil { + if vtmsg, ok := interface{}(m.Enabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Enabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.CryptoHandshakeTimeout != nil { + size, err := (*durationpb.Duration)(m.CryptoHandshakeTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + if vtmsg, ok := interface{}(m.QuicProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.QuicProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + if size, ok := interface{}(m.QuicProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.QuicProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoHandshakeTimeout != nil { + l = (*durationpb.Duration)(m.CryptoHandshakeTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + if size, ok := interface{}(m.Enabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Enabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PacketsToReadToConnectionCountRatio != nil { + l = (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoStreamConfig != nil { + if size, ok := interface{}(m.CryptoStreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CryptoStreamConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProofSourceConfig != nil { + if size, ok := interface{}(m.ProofSourceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProofSourceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdGeneratorConfig != nil { + if size, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionIdGeneratorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ServerPreferredAddressConfig != nil { + if size, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServerPreferredAddressConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendDisableActiveMigration != nil { + l = (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionDebugVisitorConfig != nil { + if size, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionDebugVisitorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go new file mode 100644 index 000000000..a5fb16222 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go @@ -0,0 +1,283 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 9] +type UdpListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // UDP socket configuration for the listener. The default for + // :ref:`prefer_gro ` is false for + // listener sockets. If receiving a large amount of datagrams from a small number of sources, it + // may be worthwhile to enable this option after performance testing. + DownstreamSocketConfig *v3.UdpSocketConfig `protobuf:"bytes,5,opt,name=downstream_socket_config,json=downstreamSocketConfig,proto3" json:"downstream_socket_config,omitempty"` + // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set + // to the default object to enable QUIC without modifying any additional options. + QuicOptions *QuicProtocolOptions `protobuf:"bytes,7,opt,name=quic_options,json=quicOptions,proto3" json:"quic_options,omitempty"` + // Configuration for the UDP packet writer. If empty, HTTP/3 will use GSO if available + // (:ref:`UdpDefaultWriterFactory `) + // or the default kernel sendmsg if not, + // (:ref:`UdpDefaultWriterFactory `) + // and raw UDP will use kernel sendmsg. + // [#extension-category: envoy.udp_packet_writer] + UdpPacketPacketWriterConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=udp_packet_packet_writer_config,json=udpPacketPacketWriterConfig,proto3" json:"udp_packet_packet_writer_config,omitempty"` +} + +func (x *UdpListenerConfig) Reset() { + *x = UdpListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UdpListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UdpListenerConfig) ProtoMessage() {} + +func (x *UdpListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UdpListenerConfig.ProtoReflect.Descriptor instead. +func (*UdpListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP(), []int{0} +} + +func (x *UdpListenerConfig) GetDownstreamSocketConfig() *v3.UdpSocketConfig { + if x != nil { + return x.DownstreamSocketConfig + } + return nil +} + +func (x *UdpListenerConfig) GetQuicOptions() *QuicProtocolOptions { + if x != nil { + return x.QuicOptions + } + return nil +} + +func (x *UdpListenerConfig) GetUdpPacketPacketWriterConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.UdpPacketPacketWriterConfig + } + return nil +} + +type ActiveRawUdpListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ActiveRawUdpListenerConfig) Reset() { + *x = ActiveRawUdpListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActiveRawUdpListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActiveRawUdpListenerConfig) ProtoMessage() {} + +func (x *ActiveRawUdpListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActiveRawUdpListenerConfig.ProtoReflect.Descriptor instead. +func (*ActiveRawUdpListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP(), []int{1} +} + +var File_envoy_config_listener_v3_udp_listener_config_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x69, + 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x8e, 0x03, 0x0a, 0x11, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x18, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x16, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x0c, 0x71, 0x75, 0x69, 0x63, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x71, 0x75, + 0x69, 0x63, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x70, 0x0a, 0x1f, 0x75, 0x64, 0x70, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, + 0x75, 0x64, 0x70, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, + 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x55, 0x0a, 0x1a, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x77, 0x55, 0x64, + 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, + 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x77, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x96, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x55, 0x64, 0x70, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData = file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc +) + +func file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData) + }) + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData +} + +var file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_listener_v3_udp_listener_config_proto_goTypes = []interface{}{ + (*UdpListenerConfig)(nil), // 0: envoy.config.listener.v3.UdpListenerConfig + (*ActiveRawUdpListenerConfig)(nil), // 1: envoy.config.listener.v3.ActiveRawUdpListenerConfig + (*v3.UdpSocketConfig)(nil), // 2: envoy.config.core.v3.UdpSocketConfig + (*QuicProtocolOptions)(nil), // 3: envoy.config.listener.v3.QuicProtocolOptions + (*v3.TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs = []int32{ + 2, // 0: envoy.config.listener.v3.UdpListenerConfig.downstream_socket_config:type_name -> envoy.config.core.v3.UdpSocketConfig + 3, // 1: envoy.config.listener.v3.UdpListenerConfig.quic_options:type_name -> envoy.config.listener.v3.QuicProtocolOptions + 4, // 2: envoy.config.listener.v3.UdpListenerConfig.udp_packet_packet_writer_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_udp_listener_config_proto_init() } +func file_envoy_config_listener_v3_udp_listener_config_proto_init() { + if File_envoy_config_listener_v3_udp_listener_config_proto != nil { + return + } + file_envoy_config_listener_v3_quic_config_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UdpListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ActiveRawUdpListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_udp_listener_config_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_udp_listener_config_proto = out.File + file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc = nil + file_envoy_config_listener_v3_udp_listener_config_proto_goTypes = nil + file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go new file mode 100644 index 000000000..e52578e98 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go @@ -0,0 +1,328 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UdpListenerConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *UdpListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UdpListenerConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UdpListenerConfigMultiError, or nil if none found. +func (m *UdpListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UdpListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDownstreamSocketConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamSocketConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetQuicOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUdpPacketPacketWriterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUdpPacketPacketWriterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UdpListenerConfigMultiError(errors) + } + + return nil +} + +// UdpListenerConfigMultiError is an error wrapping multiple validation errors +// returned by UdpListenerConfig.ValidateAll() if the designated constraints +// aren't met. +type UdpListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UdpListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UdpListenerConfigMultiError) AllErrors() []error { return m } + +// UdpListenerConfigValidationError is the validation error returned by +// UdpListenerConfig.Validate if the designated constraints aren't met. +type UdpListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UdpListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UdpListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UdpListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UdpListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UdpListenerConfigValidationError) ErrorName() string { + return "UdpListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e UdpListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUdpListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UdpListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UdpListenerConfigValidationError{} + +// Validate checks the field values on ActiveRawUdpListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ActiveRawUdpListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ActiveRawUdpListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ActiveRawUdpListenerConfigMultiError, or nil if none found. +func (m *ActiveRawUdpListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ActiveRawUdpListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ActiveRawUdpListenerConfigMultiError(errors) + } + + return nil +} + +// ActiveRawUdpListenerConfigMultiError is an error wrapping multiple +// validation errors returned by ActiveRawUdpListenerConfig.ValidateAll() if +// the designated constraints aren't met. +type ActiveRawUdpListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActiveRawUdpListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActiveRawUdpListenerConfigMultiError) AllErrors() []error { return m } + +// ActiveRawUdpListenerConfigValidationError is the validation error returned +// by ActiveRawUdpListenerConfig.Validate if the designated constraints aren't met. +type ActiveRawUdpListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActiveRawUdpListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActiveRawUdpListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActiveRawUdpListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActiveRawUdpListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActiveRawUdpListenerConfigValidationError) ErrorName() string { + return "ActiveRawUdpListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ActiveRawUdpListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sActiveRawUdpListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActiveRawUdpListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActiveRawUdpListenerConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go new file mode 100644 index 000000000..326259335 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go @@ -0,0 +1,184 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UdpPacketPacketWriterConfig != nil { + if vtmsg, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UdpPacketPacketWriterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.QuicOptions != nil { + size, err := m.QuicOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.DownstreamSocketConfig != nil { + if vtmsg, ok := interface{}(m.DownstreamSocketConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamSocketConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ActiveRawUdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *UdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DownstreamSocketConfig != nil { + if size, ok := interface{}(m.DownstreamSocketConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamSocketConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.QuicOptions != nil { + l = m.QuicOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UdpPacketPacketWriterConfig != nil { + if size, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UdpPacketPacketWriterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActiveRawUdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go new file mode 100644 index 000000000..7ff4dff16 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HistogramEmitMode is used to configure which metric types should be emitted for histograms. +type HistogramEmitMode int32 + +const ( + // Emit Histogram and Summary metric types. + HistogramEmitMode_SUMMARY_AND_HISTOGRAM HistogramEmitMode = 0 + // Emit only Summary metric types. + HistogramEmitMode_SUMMARY HistogramEmitMode = 1 + // Emit only Histogram metric types. + HistogramEmitMode_HISTOGRAM HistogramEmitMode = 2 +) + +// Enum value maps for HistogramEmitMode. +var ( + HistogramEmitMode_name = map[int32]string{ + 0: "SUMMARY_AND_HISTOGRAM", + 1: "SUMMARY", + 2: "HISTOGRAM", + } + HistogramEmitMode_value = map[string]int32{ + "SUMMARY_AND_HISTOGRAM": 0, + "SUMMARY": 1, + "HISTOGRAM": 2, + } +) + +func (x HistogramEmitMode) Enum() *HistogramEmitMode { + p := new(HistogramEmitMode) + *p = x + return p +} + +func (x HistogramEmitMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HistogramEmitMode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0].Descriptor() +} + +func (HistogramEmitMode) Type() protoreflect.EnumType { + return &file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0] +} + +func (x HistogramEmitMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HistogramEmitMode.Descriptor instead. +func (HistogramEmitMode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP(), []int{0} +} + +// Metrics Service is configured as a built-in “envoy.stat_sinks.metrics_service“ :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// +// Example: +// +// .. code-block:: yaml +// +// stats_sinks: +// - name: envoy.stat_sinks.metrics_service +// typed_config: +// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig +// +// [#extension: envoy.stat_sinks.metrics_service] +// [#next-free-field: 6] +type MetricsServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that hosts the metrics service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + TransportApiVersion v3.ApiVersion `protobuf:"varint,3,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + ReportCountersAsDeltas *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=report_counters_as_deltas,json=reportCountersAsDeltas,proto3" json:"report_counters_as_deltas,omitempty"` + // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, + // and the tag extracted name will be used instead of the full name, which may contain values used by the tag + // extractor or additional tags added during stats creation. + EmitTagsAsLabels bool `protobuf:"varint,4,opt,name=emit_tags_as_labels,json=emitTagsAsLabels,proto3" json:"emit_tags_as_labels,omitempty"` + // Specify which metrics types to emit for histograms. Defaults to SUMMARY_AND_HISTOGRAM. + HistogramEmitMode HistogramEmitMode `protobuf:"varint,5,opt,name=histogram_emit_mode,json=histogramEmitMode,proto3,enum=envoy.config.metrics.v3.HistogramEmitMode" json:"histogram_emit_mode,omitempty"` +} + +func (x *MetricsServiceConfig) Reset() { + *x = MetricsServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricsServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsServiceConfig) ProtoMessage() {} + +func (x *MetricsServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsServiceConfig.ProtoReflect.Descriptor instead. +func (*MetricsServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricsServiceConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *MetricsServiceConfig) GetTransportApiVersion() v3.ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return v3.ApiVersion(0) +} + +func (x *MetricsServiceConfig) GetReportCountersAsDeltas() *wrapperspb.BoolValue { + if x != nil { + return x.ReportCountersAsDeltas + } + return nil +} + +func (x *MetricsServiceConfig) GetEmitTagsAsLabels() bool { + if x != nil { + return x.EmitTagsAsLabels + } + return false +} + +func (x *MetricsServiceConfig) GetHistogramEmitMode() HistogramEmitMode { + if x != nil { + return x.HistogramEmitMode + } + return HistogramEmitMode_SUMMARY_AND_HISTOGRAM +} + +var File_envoy_config_metrics_v3_metrics_service_proto protoreflect.FileDescriptor + +var file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x03, 0x0a, 0x14, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x55, 0x0a, 0x19, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, + 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x41, 0x73, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x61, 0x67, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6d, 0x69, 0x74, 0x54, 0x61, 0x67, 0x73, 0x41, 0x73, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x5f, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x11, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, + 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x02, 0x42, 0x90, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x13, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_metrics_v3_metrics_service_proto_rawDescOnce sync.Once + file_envoy_config_metrics_v3_metrics_service_proto_rawDescData = file_envoy_config_metrics_v3_metrics_service_proto_rawDesc +) + +func file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP() []byte { + file_envoy_config_metrics_v3_metrics_service_proto_rawDescOnce.Do(func() { + file_envoy_config_metrics_v3_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_metrics_v3_metrics_service_proto_rawDescData) + }) + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescData +} + +var file_envoy_config_metrics_v3_metrics_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_metrics_v3_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_metrics_v3_metrics_service_proto_goTypes = []interface{}{ + (HistogramEmitMode)(0), // 0: envoy.config.metrics.v3.HistogramEmitMode + (*MetricsServiceConfig)(nil), // 1: envoy.config.metrics.v3.MetricsServiceConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (v3.ApiVersion)(0), // 3: envoy.config.core.v3.ApiVersion + (*wrapperspb.BoolValue)(nil), // 4: google.protobuf.BoolValue +} +var file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = []int32{ + 2, // 0: envoy.config.metrics.v3.MetricsServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 3, // 1: envoy.config.metrics.v3.MetricsServiceConfig.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 4, // 2: envoy.config.metrics.v3.MetricsServiceConfig.report_counters_as_deltas:type_name -> google.protobuf.BoolValue + 0, // 3: envoy.config.metrics.v3.MetricsServiceConfig.histogram_emit_mode:type_name -> envoy.config.metrics.v3.HistogramEmitMode + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_metrics_v3_metrics_service_proto_init() } +func file_envoy_config_metrics_v3_metrics_service_proto_init() { + if File_envoy_config_metrics_v3_metrics_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_metrics_v3_metrics_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_metrics_v3_metrics_service_proto_goTypes, + DependencyIndexes: file_envoy_config_metrics_v3_metrics_service_proto_depIdxs, + EnumInfos: file_envoy_config_metrics_v3_metrics_service_proto_enumTypes, + MessageInfos: file_envoy_config_metrics_v3_metrics_service_proto_msgTypes, + }.Build() + File_envoy_config_metrics_v3_metrics_service_proto = out.File + file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = nil + file_envoy_config_metrics_v3_metrics_service_proto_goTypes = nil + file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go new file mode 100644 index 000000000..803f930ba --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go @@ -0,0 +1,236 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.ApiVersion(0) +) + +// Validate checks the field values on MetricsServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetricsServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetricsServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetricsServiceConfigMultiError, or nil if none found. +func (m *MetricsServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *MetricsServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := v3.ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := MetricsServiceConfigValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetReportCountersAsDeltas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportCountersAsDeltas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EmitTagsAsLabels + + if _, ok := HistogramEmitMode_name[int32(m.GetHistogramEmitMode())]; !ok { + err := MetricsServiceConfigValidationError{ + field: "HistogramEmitMode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetricsServiceConfigMultiError(errors) + } + + return nil +} + +// MetricsServiceConfigMultiError is an error wrapping multiple validation +// errors returned by MetricsServiceConfig.ValidateAll() if the designated +// constraints aren't met. +type MetricsServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetricsServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetricsServiceConfigMultiError) AllErrors() []error { return m } + +// MetricsServiceConfigValidationError is the validation error returned by +// MetricsServiceConfig.Validate if the designated constraints aren't met. +type MetricsServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetricsServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetricsServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetricsServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetricsServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetricsServiceConfigValidationError) ErrorName() string { + return "MetricsServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e MetricsServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetricsServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetricsServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetricsServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go new file mode 100644 index 000000000..b3f692c45 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go @@ -0,0 +1,139 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetricsServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetricsServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HistogramEmitMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HistogramEmitMode)) + i-- + dAtA[i] = 0x28 + } + if m.EmitTagsAsLabels { + i-- + if m.EmitTagsAsLabels { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x18 + } + if m.ReportCountersAsDeltas != nil { + size, err := (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetricsServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportCountersAsDeltas != nil { + l = (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if m.EmitTagsAsLabels { + n += 2 + } + if m.HistogramEmitMode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HistogramEmitMode)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go new file mode 100644 index 000000000..026899002 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go @@ -0,0 +1,1185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for pluggable stats sinks. +type StatsSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. + // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. + // Sinks optionally support tagged/multiple dimensional metrics. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + // [#extension-category: envoy.stats_sinks] + // + // Types that are assignable to ConfigType: + // + // *StatsSink_TypedConfig + ConfigType isStatsSink_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *StatsSink) Reset() { + *x = StatsSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsSink) ProtoMessage() {} + +func (x *StatsSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsSink.ProtoReflect.Descriptor instead. +func (*StatsSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *StatsSink) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *StatsSink) GetConfigType() isStatsSink_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *StatsSink) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*StatsSink_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isStatsSink_ConfigType interface { + isStatsSink_ConfigType() +} + +type StatsSink_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*StatsSink_TypedConfig) isStatsSink_ConfigType() {} + +// Statistics configuration such as tagging. +type StatsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each stat name is independently processed through these tag specifiers. When a tag is + // matched, the first capture group is not immediately removed from the name, so later + // :ref:`TagSpecifiers ` can also match that + // same portion of the match. After all tag matching is complete, a tag-extracted version of + // the name is produced and is used in stats sinks that represent tags, such as Prometheus. + StatsTags []*TagSpecifier `protobuf:"bytes,1,rep,name=stats_tags,json=statsTags,proto3" json:"stats_tags,omitempty"` + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + UseAllDefaultTags *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=use_all_default_tags,json=useAllDefaultTags,proto3" json:"use_all_default_tags,omitempty"` + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher *StatsMatcher `protobuf:"bytes,3,opt,name=stats_matcher,json=statsMatcher,proto3" json:"stats_matcher,omitempty"` + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + HistogramBucketSettings []*HistogramBucketSettings `protobuf:"bytes,4,rep,name=histogram_bucket_settings,json=histogramBucketSettings,proto3" json:"histogram_bucket_settings,omitempty"` +} + +func (x *StatsConfig) Reset() { + *x = StatsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsConfig) ProtoMessage() {} + +func (x *StatsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsConfig.ProtoReflect.Descriptor instead. +func (*StatsConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *StatsConfig) GetStatsTags() []*TagSpecifier { + if x != nil { + return x.StatsTags + } + return nil +} + +func (x *StatsConfig) GetUseAllDefaultTags() *wrapperspb.BoolValue { + if x != nil { + return x.UseAllDefaultTags + } + return nil +} + +func (x *StatsConfig) GetStatsMatcher() *StatsMatcher { + if x != nil { + return x.StatsMatcher + } + return nil +} + +func (x *StatsConfig) GetHistogramBucketSettings() []*HistogramBucketSettings { + if x != nil { + return x.HistogramBucketSettings + } + return nil +} + +// Configuration for disabling stat instantiation. +type StatsMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to StatsMatcher: + // + // *StatsMatcher_RejectAll + // *StatsMatcher_ExclusionList + // *StatsMatcher_InclusionList + StatsMatcher isStatsMatcher_StatsMatcher `protobuf_oneof:"stats_matcher"` +} + +func (x *StatsMatcher) Reset() { + *x = StatsMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsMatcher) ProtoMessage() {} + +func (x *StatsMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsMatcher.ProtoReflect.Descriptor instead. +func (*StatsMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{2} +} + +func (m *StatsMatcher) GetStatsMatcher() isStatsMatcher_StatsMatcher { + if m != nil { + return m.StatsMatcher + } + return nil +} + +func (x *StatsMatcher) GetRejectAll() bool { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_RejectAll); ok { + return x.RejectAll + } + return false +} + +func (x *StatsMatcher) GetExclusionList() *v3.ListStringMatcher { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_ExclusionList); ok { + return x.ExclusionList + } + return nil +} + +func (x *StatsMatcher) GetInclusionList() *v3.ListStringMatcher { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_InclusionList); ok { + return x.InclusionList + } + return nil +} + +type isStatsMatcher_StatsMatcher interface { + isStatsMatcher_StatsMatcher() +} + +type StatsMatcher_RejectAll struct { + // If “reject_all“ is true, then all stats are disabled. If “reject_all“ is false, then all + // stats are enabled. + RejectAll bool `protobuf:"varint,1,opt,name=reject_all,json=rejectAll,proto3,oneof"` +} + +type StatsMatcher_ExclusionList struct { + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + ExclusionList *v3.ListStringMatcher `protobuf:"bytes,2,opt,name=exclusion_list,json=exclusionList,proto3,oneof"` +} + +type StatsMatcher_InclusionList struct { + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + InclusionList *v3.ListStringMatcher `protobuf:"bytes,3,opt,name=inclusion_list,json=inclusionList,proto3,oneof"` +} + +func (*StatsMatcher_RejectAll) isStatsMatcher_StatsMatcher() {} + +func (*StatsMatcher_ExclusionList) isStatsMatcher_StatsMatcher() {} + +func (*StatsMatcher_InclusionList) isStatsMatcher_StatsMatcher() {} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +type TagSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // A stat name may be spelled in such a way that it matches two different + // tag extractors for the same tag name. In that case, all but one of the + // tag values will be dropped. It is not specified which tag value will be + // retained. The extraction will only occur for one of the extractors, and + // only the matched extraction will be removed from the tag name. + TagName string `protobuf:"bytes,1,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` + // Types that are assignable to TagValue: + // + // *TagSpecifier_Regex + // *TagSpecifier_FixedValue + TagValue isTagSpecifier_TagValue `protobuf_oneof:"tag_value"` +} + +func (x *TagSpecifier) Reset() { + *x = TagSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TagSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TagSpecifier) ProtoMessage() {} + +func (x *TagSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TagSpecifier.ProtoReflect.Descriptor instead. +func (*TagSpecifier) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{3} +} + +func (x *TagSpecifier) GetTagName() string { + if x != nil { + return x.TagName + } + return "" +} + +func (m *TagSpecifier) GetTagValue() isTagSpecifier_TagValue { + if m != nil { + return m.TagValue + } + return nil +} + +func (x *TagSpecifier) GetRegex() string { + if x, ok := x.GetTagValue().(*TagSpecifier_Regex); ok { + return x.Regex + } + return "" +} + +func (x *TagSpecifier) GetFixedValue() string { + if x, ok := x.GetTagValue().(*TagSpecifier_FixedValue); ok { + return x.FixedValue + } + return "" +} + +type isTagSpecifier_TagValue interface { + isTagSpecifier_TagValue() +} + +type TagSpecifier_Regex struct { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name “cluster.foo_cluster.upstream_rq_timeout“ and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\\.((.+?)\\.)" + // } + // + // Note that the regex will remove “foo_cluster.“ making the tag extracted + // name “cluster.upstream_rq_timeout“ and the tag value for + // “envoy.cluster_name“ will be “foo_cluster“ (note: there will be no + // “.“ character because of the second capture group). + // + // Example 2. a stat name + // “http.connection_manager_1.user_agent.ios.downstream_cx_total“ and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\\.((.*?)\\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed from the elaborated + // stat name. + // + // The first regex will save “ios.“ as the tag value for “envoy.http_user_agent“. It will + // leave it in the name for potential matching with additional tag specifiers. After all tag + // specifiers are processed the tags will be removed from the name. + // + // The second regex will populate tag “envoy.http_conn_manager_prefix“ with value + // “connection_manager_1.“, based on the original stat name. + // + // As a final step, the matched tags are removed, leaving + // “http.user_agent.downstream_cx_total“ as the tag extracted name. + Regex string `protobuf:"bytes,2,opt,name=regex,proto3,oneof"` +} + +type TagSpecifier_FixedValue struct { + // Specifies a fixed tag value for the “tag_name“. + FixedValue string `protobuf:"bytes,3,opt,name=fixed_value,json=fixedValue,proto3,oneof"` +} + +func (*TagSpecifier_Regex) isTagSpecifier_TagValue() {} + +func (*TagSpecifier_FixedValue) isTagSpecifier_TagValue() {} + +// Specifies a matcher for stats and the buckets that matching stats should use. +type HistogramBucketSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example “cluster.exampleclustername.upstream_cx_length_ms“. + Match *v3.StringMatcher `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + Buckets []float64 `protobuf:"fixed64,2,rep,packed,name=buckets,proto3" json:"buckets,omitempty"` +} + +func (x *HistogramBucketSettings) Reset() { + *x = HistogramBucketSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HistogramBucketSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistogramBucketSettings) ProtoMessage() {} + +func (x *HistogramBucketSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistogramBucketSettings.ProtoReflect.Descriptor instead. +func (*HistogramBucketSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{4} +} + +func (x *HistogramBucketSettings) GetMatch() *v3.StringMatcher { + if x != nil { + return x.Match + } + return nil +} + +func (x *HistogramBucketSettings) GetBuckets() []float64 { + if x != nil { + return x.Buckets + } + return nil +} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.statsd“ sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +type StatsdSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to StatsdSpecifier: + // + // *StatsdSink_Address + // *StatsdSink_TcpClusterName + StatsdSpecifier isStatsdSink_StatsdSpecifier `protobuf_oneof:"statsd_specifier"` + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (x *StatsdSink) Reset() { + *x = StatsdSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsdSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsdSink) ProtoMessage() {} + +func (x *StatsdSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsdSink.ProtoReflect.Descriptor instead. +func (*StatsdSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{5} +} + +func (m *StatsdSink) GetStatsdSpecifier() isStatsdSink_StatsdSpecifier { + if m != nil { + return m.StatsdSpecifier + } + return nil +} + +func (x *StatsdSink) GetAddress() *v31.Address { + if x, ok := x.GetStatsdSpecifier().(*StatsdSink_Address); ok { + return x.Address + } + return nil +} + +func (x *StatsdSink) GetTcpClusterName() string { + if x, ok := x.GetStatsdSpecifier().(*StatsdSink_TcpClusterName); ok { + return x.TcpClusterName + } + return "" +} + +func (x *StatsdSink) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +type isStatsdSink_StatsdSpecifier interface { + isStatsdSink_StatsdSpecifier() +} + +type StatsdSink_Address struct { + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + Address *v31.Address `protobuf:"bytes,1,opt,name=address,proto3,oneof"` +} + +type StatsdSink_TcpClusterName struct { + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + TcpClusterName string `protobuf:"bytes,2,opt,name=tcp_cluster_name,json=tcpClusterName,proto3,oneof"` +} + +func (*StatsdSink_Address) isStatsdSink_StatsdSpecifier() {} + +func (*StatsdSink_TcpClusterName) isStatsdSink_StatsdSpecifier() {} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.dog_statsd“ sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +type DogStatsdSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to DogStatsdSpecifier: + // + // *DogStatsdSink_Address + DogStatsdSpecifier isDogStatsdSink_DogStatsdSpecifier `protobuf_oneof:"dog_statsd_specifier"` + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + MaxBytesPerDatagram *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=max_bytes_per_datagram,json=maxBytesPerDatagram,proto3" json:"max_bytes_per_datagram,omitempty"` +} + +func (x *DogStatsdSink) Reset() { + *x = DogStatsdSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DogStatsdSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DogStatsdSink) ProtoMessage() {} + +func (x *DogStatsdSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DogStatsdSink.ProtoReflect.Descriptor instead. +func (*DogStatsdSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{6} +} + +func (m *DogStatsdSink) GetDogStatsdSpecifier() isDogStatsdSink_DogStatsdSpecifier { + if m != nil { + return m.DogStatsdSpecifier + } + return nil +} + +func (x *DogStatsdSink) GetAddress() *v31.Address { + if x, ok := x.GetDogStatsdSpecifier().(*DogStatsdSink_Address); ok { + return x.Address + } + return nil +} + +func (x *DogStatsdSink) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *DogStatsdSink) GetMaxBytesPerDatagram() *wrapperspb.UInt64Value { + if x != nil { + return x.MaxBytesPerDatagram + } + return nil +} + +type isDogStatsdSink_DogStatsdSpecifier interface { + isDogStatsdSink_DogStatsdSpecifier() +} + +type DogStatsdSink_Address struct { + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + Address *v31.Address `protobuf:"bytes,1,opt,name=address,proto3,oneof"` +} + +func (*DogStatsdSink_Address) isDogStatsdSink_DogStatsdSpecifier() {} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.hystrix“ sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +type HystrixSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // “rolling_window(ms)“ = “stats_flush_interval(ms)“ * “num_of_buckets“ + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + NumBuckets int64 `protobuf:"varint,1,opt,name=num_buckets,json=numBuckets,proto3" json:"num_buckets,omitempty"` +} + +func (x *HystrixSink) Reset() { + *x = HystrixSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HystrixSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HystrixSink) ProtoMessage() {} + +func (x *HystrixSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HystrixSink.ProtoReflect.Descriptor instead. +func (*HystrixSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{7} +} + +func (x *HystrixSink) GetNumBuckets() int64 { + if x != nil { + return x.NumBuckets + } + return 0 +} + +var File_envoy_config_metrics_v3_stats_proto protoreflect.FileDescriptor + +var file_envoy_config_metrics_v3_stats_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x01, 0x0a, + 0x09, 0x53, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, + 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x53, + 0x69, 0x6e, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x86, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x44, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, + 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x54, 0x61, 0x67, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x11, 0x75, 0x73, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, + 0x61, 0x67, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x6c, 0x0a, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x17, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3a, 0x2a, 0x9a, + 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x98, 0x02, 0x0a, 0x0c, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0a, 0x72, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x09, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x6c, 0x12, 0x51, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x51, + 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa8, 0x01, 0x0a, 0x0c, 0x54, 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x28, 0x80, 0x08, 0x48, 0x00, 0x52, 0x05, 0x72, 0x65, 0x67, + 0x65, 0x78, 0x12, 0x21, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x74, 0x61, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x92, 0x01, 0x0a, 0x17, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x31, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x92, 0x01, 0x11, 0x08, 0x01, 0x18, 0x01, 0x22, 0x0b, + 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x64, 0x53, + 0x69, 0x6e, 0x6b, 0x12, 0x39, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, + 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x17, 0x0a, + 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x8f, 0x02, 0x0a, 0x0d, 0x44, 0x6f, 0x67, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x39, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5a, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, + 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x1b, 0x0a, 0x14, 0x64, 0x6f, 0x67, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x5a, 0x0a, 0x0b, 0x48, 0x79, 0x73, 0x74, + 0x72, 0x69, 0x78, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6e, 0x75, + 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x79, 0x73, 0x74, 0x72, 0x69, 0x78, + 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x87, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_metrics_v3_stats_proto_rawDescOnce sync.Once + file_envoy_config_metrics_v3_stats_proto_rawDescData = file_envoy_config_metrics_v3_stats_proto_rawDesc +) + +func file_envoy_config_metrics_v3_stats_proto_rawDescGZIP() []byte { + file_envoy_config_metrics_v3_stats_proto_rawDescOnce.Do(func() { + file_envoy_config_metrics_v3_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_metrics_v3_stats_proto_rawDescData) + }) + return file_envoy_config_metrics_v3_stats_proto_rawDescData +} + +var file_envoy_config_metrics_v3_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_config_metrics_v3_stats_proto_goTypes = []interface{}{ + (*StatsSink)(nil), // 0: envoy.config.metrics.v3.StatsSink + (*StatsConfig)(nil), // 1: envoy.config.metrics.v3.StatsConfig + (*StatsMatcher)(nil), // 2: envoy.config.metrics.v3.StatsMatcher + (*TagSpecifier)(nil), // 3: envoy.config.metrics.v3.TagSpecifier + (*HistogramBucketSettings)(nil), // 4: envoy.config.metrics.v3.HistogramBucketSettings + (*StatsdSink)(nil), // 5: envoy.config.metrics.v3.StatsdSink + (*DogStatsdSink)(nil), // 6: envoy.config.metrics.v3.DogStatsdSink + (*HystrixSink)(nil), // 7: envoy.config.metrics.v3.HystrixSink + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*v3.ListStringMatcher)(nil), // 10: envoy.type.matcher.v3.ListStringMatcher + (*v3.StringMatcher)(nil), // 11: envoy.type.matcher.v3.StringMatcher + (*v31.Address)(nil), // 12: envoy.config.core.v3.Address + (*wrapperspb.UInt64Value)(nil), // 13: google.protobuf.UInt64Value +} +var file_envoy_config_metrics_v3_stats_proto_depIdxs = []int32{ + 8, // 0: envoy.config.metrics.v3.StatsSink.typed_config:type_name -> google.protobuf.Any + 3, // 1: envoy.config.metrics.v3.StatsConfig.stats_tags:type_name -> envoy.config.metrics.v3.TagSpecifier + 9, // 2: envoy.config.metrics.v3.StatsConfig.use_all_default_tags:type_name -> google.protobuf.BoolValue + 2, // 3: envoy.config.metrics.v3.StatsConfig.stats_matcher:type_name -> envoy.config.metrics.v3.StatsMatcher + 4, // 4: envoy.config.metrics.v3.StatsConfig.histogram_bucket_settings:type_name -> envoy.config.metrics.v3.HistogramBucketSettings + 10, // 5: envoy.config.metrics.v3.StatsMatcher.exclusion_list:type_name -> envoy.type.matcher.v3.ListStringMatcher + 10, // 6: envoy.config.metrics.v3.StatsMatcher.inclusion_list:type_name -> envoy.type.matcher.v3.ListStringMatcher + 11, // 7: envoy.config.metrics.v3.HistogramBucketSettings.match:type_name -> envoy.type.matcher.v3.StringMatcher + 12, // 8: envoy.config.metrics.v3.StatsdSink.address:type_name -> envoy.config.core.v3.Address + 12, // 9: envoy.config.metrics.v3.DogStatsdSink.address:type_name -> envoy.config.core.v3.Address + 13, // 10: envoy.config.metrics.v3.DogStatsdSink.max_bytes_per_datagram:type_name -> google.protobuf.UInt64Value + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_metrics_v3_stats_proto_init() } +func file_envoy_config_metrics_v3_stats_proto_init() { + if File_envoy_config_metrics_v3_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_metrics_v3_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TagSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HistogramBucketSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsdSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DogStatsdSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HystrixSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StatsSink_TypedConfig)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*StatsMatcher_RejectAll)(nil), + (*StatsMatcher_ExclusionList)(nil), + (*StatsMatcher_InclusionList)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*TagSpecifier_Regex)(nil), + (*TagSpecifier_FixedValue)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*StatsdSink_Address)(nil), + (*StatsdSink_TcpClusterName)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*DogStatsdSink_Address)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_metrics_v3_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_metrics_v3_stats_proto_goTypes, + DependencyIndexes: file_envoy_config_metrics_v3_stats_proto_depIdxs, + MessageInfos: file_envoy_config_metrics_v3_stats_proto_msgTypes, + }.Build() + File_envoy_config_metrics_v3_stats_proto = out.File + file_envoy_config_metrics_v3_stats_proto_rawDesc = nil + file_envoy_config_metrics_v3_stats_proto_goTypes = nil + file_envoy_config_metrics_v3_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go new file mode 100644 index 000000000..60e9fdaaf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go @@ -0,0 +1,1394 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StatsSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsSinkMultiError, or nil +// if none found. +func (m *StatsSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *StatsSink_TypedConfig: + if v == nil { + err := StatsSinkValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return StatsSinkMultiError(errors) + } + + return nil +} + +// StatsSinkMultiError is an error wrapping multiple validation errors returned +// by StatsSink.ValidateAll() if the designated constraints aren't met. +type StatsSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsSinkMultiError) AllErrors() []error { return m } + +// StatsSinkValidationError is the validation error returned by +// StatsSink.Validate if the designated constraints aren't met. +type StatsSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsSinkValidationError) ErrorName() string { return "StatsSinkValidationError" } + +// Error satisfies the builtin error interface +func (e StatsSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsSinkValidationError{} + +// Validate checks the field values on StatsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsConfigMultiError, or +// nil if none found. +func (m *StatsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatsTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUseAllDefaultTags()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseAllDefaultTags()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStatsMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHistogramBucketSettings() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return StatsConfigMultiError(errors) + } + + return nil +} + +// StatsConfigMultiError is an error wrapping multiple validation errors +// returned by StatsConfig.ValidateAll() if the designated constraints aren't met. +type StatsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsConfigMultiError) AllErrors() []error { return m } + +// StatsConfigValidationError is the validation error returned by +// StatsConfig.Validate if the designated constraints aren't met. +type StatsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsConfigValidationError) ErrorName() string { return "StatsConfigValidationError" } + +// Error satisfies the builtin error interface +func (e StatsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsConfigValidationError{} + +// Validate checks the field values on StatsMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsMatcherMultiError, or +// nil if none found. +func (m *StatsMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofStatsMatcherPresent := false + switch v := m.StatsMatcher.(type) { + case *StatsMatcher_RejectAll: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + // no validation rules for RejectAll + case *StatsMatcher_ExclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + + if all { + switch v := interface{}(m.GetExclusionList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExclusionList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StatsMatcher_InclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + + if all { + switch v := interface{}(m.GetInclusionList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInclusionList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofStatsMatcherPresent { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StatsMatcherMultiError(errors) + } + + return nil +} + +// StatsMatcherMultiError is an error wrapping multiple validation errors +// returned by StatsMatcher.ValidateAll() if the designated constraints aren't met. +type StatsMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsMatcherMultiError) AllErrors() []error { return m } + +// StatsMatcherValidationError is the validation error returned by +// StatsMatcher.Validate if the designated constraints aren't met. +type StatsMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsMatcherValidationError) ErrorName() string { return "StatsMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StatsMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsMatcherValidationError{} + +// Validate checks the field values on TagSpecifier with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TagSpecifier) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TagSpecifier with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TagSpecifierMultiError, or +// nil if none found. +func (m *TagSpecifier) ValidateAll() error { + return m.validate(true) +} + +func (m *TagSpecifier) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TagName + + switch v := m.TagValue.(type) { + case *TagSpecifier_Regex: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetRegex()) > 1024 { + err := TagSpecifierValidationError{ + field: "Regex", + reason: "value length must be at most 1024 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *TagSpecifier_FixedValue: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for FixedValue + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TagSpecifierMultiError(errors) + } + + return nil +} + +// TagSpecifierMultiError is an error wrapping multiple validation errors +// returned by TagSpecifier.ValidateAll() if the designated constraints aren't met. +type TagSpecifierMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TagSpecifierMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TagSpecifierMultiError) AllErrors() []error { return m } + +// TagSpecifierValidationError is the validation error returned by +// TagSpecifier.Validate if the designated constraints aren't met. +type TagSpecifierValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TagSpecifierValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TagSpecifierValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TagSpecifierValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TagSpecifierValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TagSpecifierValidationError) ErrorName() string { return "TagSpecifierValidationError" } + +// Error satisfies the builtin error interface +func (e TagSpecifierValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTagSpecifier.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TagSpecifierValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TagSpecifierValidationError{} + +// Validate checks the field values on HistogramBucketSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HistogramBucketSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HistogramBucketSettings with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HistogramBucketSettingsMultiError, or nil if none found. +func (m *HistogramBucketSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *HistogramBucketSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMatch() == nil { + err := HistogramBucketSettingsValidationError{ + field: "Match", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetBuckets()) < 1 { + err := HistogramBucketSettingsValidationError{ + field: "Buckets", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + _HistogramBucketSettings_Buckets_Unique := make(map[float64]struct{}, len(m.GetBuckets())) + + for idx, item := range m.GetBuckets() { + _, _ = idx, item + + if _, exists := _HistogramBucketSettings_Buckets_Unique[item]; exists { + err := HistogramBucketSettingsValidationError{ + field: fmt.Sprintf("Buckets[%v]", idx), + reason: "repeated value must contain unique items", + } + if !all { + return err + } + errors = append(errors, err) + } else { + _HistogramBucketSettings_Buckets_Unique[item] = struct{}{} + } + + if item <= 0 { + err := HistogramBucketSettingsValidationError{ + field: fmt.Sprintf("Buckets[%v]", idx), + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return HistogramBucketSettingsMultiError(errors) + } + + return nil +} + +// HistogramBucketSettingsMultiError is an error wrapping multiple validation +// errors returned by HistogramBucketSettings.ValidateAll() if the designated +// constraints aren't met. +type HistogramBucketSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HistogramBucketSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HistogramBucketSettingsMultiError) AllErrors() []error { return m } + +// HistogramBucketSettingsValidationError is the validation error returned by +// HistogramBucketSettings.Validate if the designated constraints aren't met. +type HistogramBucketSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HistogramBucketSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HistogramBucketSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HistogramBucketSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HistogramBucketSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HistogramBucketSettingsValidationError) ErrorName() string { + return "HistogramBucketSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e HistogramBucketSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHistogramBucketSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HistogramBucketSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HistogramBucketSettingsValidationError{} + +// Validate checks the field values on StatsdSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsdSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsdSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsdSinkMultiError, or +// nil if none found. +func (m *StatsdSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsdSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Prefix + + oneofStatsdSpecifierPresent := false + switch v := m.StatsdSpecifier.(type) { + case *StatsdSink_Address: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StatsdSink_TcpClusterName: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true + // no validation rules for TcpClusterName + default: + _ = v // ensures v is used + } + if !oneofStatsdSpecifierPresent { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StatsdSinkMultiError(errors) + } + + return nil +} + +// StatsdSinkMultiError is an error wrapping multiple validation errors +// returned by StatsdSink.ValidateAll() if the designated constraints aren't met. +type StatsdSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsdSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsdSinkMultiError) AllErrors() []error { return m } + +// StatsdSinkValidationError is the validation error returned by +// StatsdSink.Validate if the designated constraints aren't met. +type StatsdSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsdSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsdSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsdSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsdSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsdSinkValidationError) ErrorName() string { return "StatsdSinkValidationError" } + +// Error satisfies the builtin error interface +func (e StatsdSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsdSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsdSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsdSinkValidationError{} + +// Validate checks the field values on DogStatsdSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DogStatsdSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DogStatsdSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DogStatsdSinkMultiError, or +// nil if none found. +func (m *DogStatsdSink) ValidateAll() error { + return m.validate(true) +} + +func (m *DogStatsdSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Prefix + + if wrapper := m.GetMaxBytesPerDatagram(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := DogStatsdSinkValidationError{ + field: "MaxBytesPerDatagram", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + oneofDogStatsdSpecifierPresent := false + switch v := m.DogStatsdSpecifier.(type) { + case *DogStatsdSink_Address: + if v == nil { + err := DogStatsdSinkValidationError{ + field: "DogStatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDogStatsdSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofDogStatsdSpecifierPresent { + err := DogStatsdSinkValidationError{ + field: "DogStatsdSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DogStatsdSinkMultiError(errors) + } + + return nil +} + +// DogStatsdSinkMultiError is an error wrapping multiple validation errors +// returned by DogStatsdSink.ValidateAll() if the designated constraints +// aren't met. +type DogStatsdSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DogStatsdSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DogStatsdSinkMultiError) AllErrors() []error { return m } + +// DogStatsdSinkValidationError is the validation error returned by +// DogStatsdSink.Validate if the designated constraints aren't met. +type DogStatsdSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DogStatsdSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DogStatsdSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DogStatsdSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DogStatsdSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DogStatsdSinkValidationError) ErrorName() string { return "DogStatsdSinkValidationError" } + +// Error satisfies the builtin error interface +func (e DogStatsdSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDogStatsdSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DogStatsdSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DogStatsdSinkValidationError{} + +// Validate checks the field values on HystrixSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HystrixSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HystrixSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HystrixSinkMultiError, or +// nil if none found. +func (m *HystrixSink) ValidateAll() error { + return m.validate(true) +} + +func (m *HystrixSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumBuckets + + if len(errors) > 0 { + return HystrixSinkMultiError(errors) + } + + return nil +} + +// HystrixSinkMultiError is an error wrapping multiple validation errors +// returned by HystrixSink.ValidateAll() if the designated constraints aren't met. +type HystrixSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HystrixSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HystrixSinkMultiError) AllErrors() []error { return m } + +// HystrixSinkValidationError is the validation error returned by +// HystrixSink.Validate if the designated constraints aren't met. +type HystrixSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HystrixSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HystrixSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HystrixSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HystrixSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HystrixSinkValidationError) ErrorName() string { return "HystrixSinkValidationError" } + +// Error satisfies the builtin error interface +func (e HystrixSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHystrixSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HystrixSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HystrixSinkValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go new file mode 100644 index 000000000..3f53827ea --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go @@ -0,0 +1,976 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*StatsSink_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsSink_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *StatsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HistogramBucketSettings) > 0 { + for iNdEx := len(m.HistogramBucketSettings) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HistogramBucketSettings[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.StatsMatcher != nil { + size, err := m.StatsMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.UseAllDefaultTags != nil { + size, err := (*wrapperspb.BoolValue)(m.UseAllDefaultTags).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.StatsTags) > 0 { + for iNdEx := len(m.StatsTags) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StatsTags[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_InclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_ExclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_RejectAll); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher_RejectAll) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_RejectAll) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.RejectAll { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *StatsMatcher_ExclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_ExclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExclusionList != nil { + if vtmsg, ok := interface{}(m.ExclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *StatsMatcher_InclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_InclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InclusionList != nil { + if vtmsg, ok := interface{}(m.InclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.InclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TagSpecifier) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagSpecifier) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TagValue.(*TagSpecifier_FixedValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TagValue.(*TagSpecifier_Regex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TagName) > 0 { + i -= len(m.TagName) + copy(dAtA[i:], m.TagName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TagName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TagSpecifier_Regex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_Regex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TagSpecifier_FixedValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_FixedValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.FixedValue) + copy(dAtA[i:], m.FixedValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FixedValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HistogramBucketSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramBucketSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HistogramBucketSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Buckets) > 0 { + for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.Buckets[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Buckets)*8)) + i-- + dAtA[i] = 0x12 + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_TcpClusterName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *StatsdSink_TcpClusterName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_TcpClusterName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TcpClusterName) + copy(dAtA[i:], m.TcpClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TcpClusterName))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DogStatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DogStatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxBytesPerDatagram != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.DogStatsdSpecifier.(*DogStatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DogStatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HystrixSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HystrixSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HystrixSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NumBuckets != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumBuckets)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatsSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsSink_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StatsTags) > 0 { + for _, e := range m.StatsTags { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseAllDefaultTags != nil { + l = (*wrapperspb.BoolValue)(m.UseAllDefaultTags).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsMatcher != nil { + l = m.StatsMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HistogramBucketSettings) > 0 { + for _, e := range m.HistogramBucketSettings { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsMatcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher_RejectAll) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *StatsMatcher_ExclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExclusionList != nil { + if size, ok := interface{}(m.ExclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsMatcher_InclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InclusionList != nil { + if size, ok := interface{}(m.InclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.InclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TagSpecifier) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TagName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TagValue.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TagSpecifier_Regex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Regex) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TagSpecifier_FixedValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FixedValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HistogramBucketSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Buckets) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.Buckets)*8)) + len(m.Buckets)*8 + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsdSink_TcpClusterName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TcpClusterName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DogStatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.DogStatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBytesPerDatagram != nil { + l = (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DogStatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HystrixSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumBuckets != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumBuckets)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go new file mode 100644 index 000000000..6feac9129 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go @@ -0,0 +1,1186 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScaleTimersOverloadActionConfig_TimerType int32 + +const ( + // Unsupported value; users must explicitly specify the timer they want scaled. + ScaleTimersOverloadActionConfig_UNSPECIFIED ScaleTimersOverloadActionConfig_TimerType = 0 + // Adjusts the idle timer for downstream HTTP connections that takes effect when there are no active streams. + // This affects the value of :ref:`HttpConnectionManager.common_http_protocol_options.idle_timeout + // ` + ScaleTimersOverloadActionConfig_HTTP_DOWNSTREAM_CONNECTION_IDLE ScaleTimersOverloadActionConfig_TimerType = 1 + // Adjusts the idle timer for HTTP streams initiated by downstream clients. + // This affects the value of :ref:`RouteAction.idle_timeout ` and + // :ref:`HttpConnectionManager.stream_idle_timeout + // ` + ScaleTimersOverloadActionConfig_HTTP_DOWNSTREAM_STREAM_IDLE ScaleTimersOverloadActionConfig_TimerType = 2 + // Adjusts the timer for how long downstream clients have to finish transport-level negotiations + // before the connection is closed. + // This affects the value of + // :ref:`FilterChain.transport_socket_connect_timeout `. + ScaleTimersOverloadActionConfig_TRANSPORT_SOCKET_CONNECT ScaleTimersOverloadActionConfig_TimerType = 3 +) + +// Enum value maps for ScaleTimersOverloadActionConfig_TimerType. +var ( + ScaleTimersOverloadActionConfig_TimerType_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "HTTP_DOWNSTREAM_CONNECTION_IDLE", + 2: "HTTP_DOWNSTREAM_STREAM_IDLE", + 3: "TRANSPORT_SOCKET_CONNECT", + } + ScaleTimersOverloadActionConfig_TimerType_value = map[string]int32{ + "UNSPECIFIED": 0, + "HTTP_DOWNSTREAM_CONNECTION_IDLE": 1, + "HTTP_DOWNSTREAM_STREAM_IDLE": 2, + "TRANSPORT_SOCKET_CONNECT": 3, + } +) + +func (x ScaleTimersOverloadActionConfig_TimerType) Enum() *ScaleTimersOverloadActionConfig_TimerType { + p := new(ScaleTimersOverloadActionConfig_TimerType) + *p = x + return p +} + +func (x ScaleTimersOverloadActionConfig_TimerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ScaleTimersOverloadActionConfig_TimerType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_overload_v3_overload_proto_enumTypes[0].Descriptor() +} + +func (ScaleTimersOverloadActionConfig_TimerType) Type() protoreflect.EnumType { + return &file_envoy_config_overload_v3_overload_proto_enumTypes[0] +} + +func (x ScaleTimersOverloadActionConfig_TimerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig_TimerType.Descriptor instead. +func (ScaleTimersOverloadActionConfig_TimerType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4, 0} +} + +type ResourceMonitor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource monitor to instantiate. Must match a registered + // resource monitor type. + // See the :ref:`extensions listed in typed_config below ` for the default list of available resource monitor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Configuration for the resource monitor being instantiated. + // [#extension-category: envoy.resource_monitors] + // + // Types that are assignable to ConfigType: + // + // *ResourceMonitor_TypedConfig + ConfigType isResourceMonitor_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *ResourceMonitor) Reset() { + *x = ResourceMonitor{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceMonitor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceMonitor) ProtoMessage() {} + +func (x *ResourceMonitor) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceMonitor.ProtoReflect.Descriptor instead. +func (*ResourceMonitor) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceMonitor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ResourceMonitor) GetConfigType() isResourceMonitor_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ResourceMonitor) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ResourceMonitor_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isResourceMonitor_ConfigType interface { + isResourceMonitor_ConfigType() +} + +type ResourceMonitor_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*ResourceMonitor_TypedConfig) isResourceMonitor_ConfigType() {} + +type ThresholdTrigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the resource pressure is greater than or equal to this value, the trigger + // will enter saturation. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ThresholdTrigger) Reset() { + *x = ThresholdTrigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ThresholdTrigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThresholdTrigger) ProtoMessage() {} + +func (x *ThresholdTrigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThresholdTrigger.ProtoReflect.Descriptor instead. +func (*ThresholdTrigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{1} +} + +func (x *ThresholdTrigger) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +type ScaledTrigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the resource pressure is greater than this value, the trigger will be in the + // :ref:`scaling ` state with value + // “(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)“. + ScalingThreshold float64 `protobuf:"fixed64,1,opt,name=scaling_threshold,json=scalingThreshold,proto3" json:"scaling_threshold,omitempty"` + // If the resource pressure is greater than this value, the trigger will enter saturation. + SaturationThreshold float64 `protobuf:"fixed64,2,opt,name=saturation_threshold,json=saturationThreshold,proto3" json:"saturation_threshold,omitempty"` +} + +func (x *ScaledTrigger) Reset() { + *x = ScaledTrigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaledTrigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaledTrigger) ProtoMessage() {} + +func (x *ScaledTrigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaledTrigger.ProtoReflect.Descriptor instead. +func (*ScaledTrigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{2} +} + +func (x *ScaledTrigger) GetScalingThreshold() float64 { + if x != nil { + return x.ScalingThreshold + } + return 0 +} + +func (x *ScaledTrigger) GetSaturationThreshold() float64 { + if x != nil { + return x.SaturationThreshold + } + return 0 +} + +type Trigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource this is a trigger for. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to TriggerOneof: + // + // *Trigger_Threshold + // *Trigger_Scaled + TriggerOneof isTrigger_TriggerOneof `protobuf_oneof:"trigger_oneof"` +} + +func (x *Trigger) Reset() { + *x = Trigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trigger) ProtoMessage() {} + +func (x *Trigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trigger.ProtoReflect.Descriptor instead. +func (*Trigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{3} +} + +func (x *Trigger) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Trigger) GetTriggerOneof() isTrigger_TriggerOneof { + if m != nil { + return m.TriggerOneof + } + return nil +} + +func (x *Trigger) GetThreshold() *ThresholdTrigger { + if x, ok := x.GetTriggerOneof().(*Trigger_Threshold); ok { + return x.Threshold + } + return nil +} + +func (x *Trigger) GetScaled() *ScaledTrigger { + if x, ok := x.GetTriggerOneof().(*Trigger_Scaled); ok { + return x.Scaled + } + return nil +} + +type isTrigger_TriggerOneof interface { + isTrigger_TriggerOneof() +} + +type Trigger_Threshold struct { + Threshold *ThresholdTrigger `protobuf:"bytes,2,opt,name=threshold,proto3,oneof"` +} + +type Trigger_Scaled struct { + Scaled *ScaledTrigger `protobuf:"bytes,3,opt,name=scaled,proto3,oneof"` +} + +func (*Trigger_Threshold) isTrigger_TriggerOneof() {} + +func (*Trigger_Scaled) isTrigger_TriggerOneof() {} + +// Typed configuration for the "envoy.overload_actions.reduce_timeouts" action. See +// :ref:`the docs ` for an example of how to configure +// the action with different timeouts and minimum values. +type ScaleTimersOverloadActionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of timer scaling rules to be applied. + TimerScaleFactors []*ScaleTimersOverloadActionConfig_ScaleTimer `protobuf:"bytes,1,rep,name=timer_scale_factors,json=timerScaleFactors,proto3" json:"timer_scale_factors,omitempty"` +} + +func (x *ScaleTimersOverloadActionConfig) Reset() { + *x = ScaleTimersOverloadActionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaleTimersOverloadActionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleTimersOverloadActionConfig) ProtoMessage() {} + +func (x *ScaleTimersOverloadActionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig.ProtoReflect.Descriptor instead. +func (*ScaleTimersOverloadActionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4} +} + +func (x *ScaleTimersOverloadActionConfig) GetTimerScaleFactors() []*ScaleTimersOverloadActionConfig_ScaleTimer { + if x != nil { + return x.TimerScaleFactors + } + return nil +} + +type OverloadAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the overload action. This is just a well-known string that listeners can + // use for registering callbacks. Custom overload actions should be named using reverse + // DNS to ensure uniqueness. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of triggers for this action. The state of the action is the maximum + // state of all triggers, which can be scalar values between 0 and 1 or + // saturated. Listeners are notified when the overload action changes state. + // An overload manager action can only have one trigger for a given resource + // e.g. :ref:`Trigger.name + // ` must be unique + // in this list. + Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` + // Configuration for the action being instantiated. + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *OverloadAction) Reset() { + *x = OverloadAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OverloadAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OverloadAction) ProtoMessage() {} + +func (x *OverloadAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OverloadAction.ProtoReflect.Descriptor instead. +func (*OverloadAction) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{5} +} + +func (x *OverloadAction) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *OverloadAction) GetTriggers() []*Trigger { + if x != nil { + return x.Triggers + } + return nil +} + +func (x *OverloadAction) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// A point within the connection or request lifecycle that provides context on +// whether to shed load at that given stage for the current entity at the +// point. +type LoadShedPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is just a well-known string for the LoadShedPoint. + // Deployment specific LoadShedPoints e.g. within a custom extension should + // be prefixed by the company / deployment name to avoid colliding with any + // open source LoadShedPoints. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of triggers for this LoadShedPoint. The LoadShedPoint will use the + // the maximum state of all triggers, which can be scalar values between 0 and + // 1 or saturated. A LoadShedPoint can only have one trigger for a given + // resource e.g. :ref:`Trigger.name + // ` must be unique in + // this list. + Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` +} + +func (x *LoadShedPoint) Reset() { + *x = LoadShedPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadShedPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadShedPoint) ProtoMessage() {} + +func (x *LoadShedPoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadShedPoint.ProtoReflect.Descriptor instead. +func (*LoadShedPoint) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{6} +} + +func (x *LoadShedPoint) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *LoadShedPoint) GetTriggers() []*Trigger { + if x != nil { + return x.Triggers + } + return nil +} + +// Configuration for which accounts the WatermarkBuffer Factories should +// track. +type BufferFactoryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum power of two at which Envoy starts tracking an account. + // + // Envoy has 8 power of two buckets starting with the provided exponent below. + // Concretely the 1st bucket contains accounts for streams that use + // [2^minimum_account_to_track_power_of_two, + // 2^(minimum_account_to_track_power_of_two + 1)) bytes. + // With the 8th bucket tracking accounts + // >= 128 * 2^minimum_account_to_track_power_of_two. + // + // The maximum value is 56, since we're using uint64_t for bytes counting, + // and that's the last value that would use the 8 buckets. In practice, + // we don't expect the proxy to be holding 2^56 bytes. + // + // If omitted, Envoy should not do any tracking. + MinimumAccountToTrackPowerOfTwo uint32 `protobuf:"varint,1,opt,name=minimum_account_to_track_power_of_two,json=minimumAccountToTrackPowerOfTwo,proto3" json:"minimum_account_to_track_power_of_two,omitempty"` +} + +func (x *BufferFactoryConfig) Reset() { + *x = BufferFactoryConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BufferFactoryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BufferFactoryConfig) ProtoMessage() {} + +func (x *BufferFactoryConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BufferFactoryConfig.ProtoReflect.Descriptor instead. +func (*BufferFactoryConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{7} +} + +func (x *BufferFactoryConfig) GetMinimumAccountToTrackPowerOfTwo() uint32 { + if x != nil { + return x.MinimumAccountToTrackPowerOfTwo + } + return 0 +} + +// [#next-free-field: 6] +type OverloadManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The interval for refreshing resource usage. + RefreshInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=refresh_interval,json=refreshInterval,proto3" json:"refresh_interval,omitempty"` + // The set of resources to monitor. + ResourceMonitors []*ResourceMonitor `protobuf:"bytes,2,rep,name=resource_monitors,json=resourceMonitors,proto3" json:"resource_monitors,omitempty"` + // The set of overload actions. + Actions []*OverloadAction `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` + // The set of load shed points. + LoadshedPoints []*LoadShedPoint `protobuf:"bytes,5,rep,name=loadshed_points,json=loadshedPoints,proto3" json:"loadshed_points,omitempty"` + // Configuration for buffer factory. + BufferFactoryConfig *BufferFactoryConfig `protobuf:"bytes,4,opt,name=buffer_factory_config,json=bufferFactoryConfig,proto3" json:"buffer_factory_config,omitempty"` +} + +func (x *OverloadManager) Reset() { + *x = OverloadManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OverloadManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OverloadManager) ProtoMessage() {} + +func (x *OverloadManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OverloadManager.ProtoReflect.Descriptor instead. +func (*OverloadManager) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{8} +} + +func (x *OverloadManager) GetRefreshInterval() *durationpb.Duration { + if x != nil { + return x.RefreshInterval + } + return nil +} + +func (x *OverloadManager) GetResourceMonitors() []*ResourceMonitor { + if x != nil { + return x.ResourceMonitors + } + return nil +} + +func (x *OverloadManager) GetActions() []*OverloadAction { + if x != nil { + return x.Actions + } + return nil +} + +func (x *OverloadManager) GetLoadshedPoints() []*LoadShedPoint { + if x != nil { + return x.LoadshedPoints + } + return nil +} + +func (x *OverloadManager) GetBufferFactoryConfig() *BufferFactoryConfig { + if x != nil { + return x.BufferFactoryConfig + } + return nil +} + +type ScaleTimersOverloadActionConfig_ScaleTimer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of timer this minimum applies to. + Timer ScaleTimersOverloadActionConfig_TimerType `protobuf:"varint,1,opt,name=timer,proto3,enum=envoy.config.overload.v3.ScaleTimersOverloadActionConfig_TimerType" json:"timer,omitempty"` + // Types that are assignable to OverloadAdjust: + // + // *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout + // *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale + OverloadAdjust isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust `protobuf_oneof:"overload_adjust"` +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) Reset() { + *x = ScaleTimersOverloadActionConfig_ScaleTimer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer) ProtoMessage() {} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig_ScaleTimer.ProtoReflect.Descriptor instead. +func (*ScaleTimersOverloadActionConfig_ScaleTimer) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetTimer() ScaleTimersOverloadActionConfig_TimerType { + if x != nil { + return x.Timer + } + return ScaleTimersOverloadActionConfig_UNSPECIFIED +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) GetOverloadAdjust() isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust { + if m != nil { + return m.OverloadAdjust + } + return nil +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinTimeout() *durationpb.Duration { + if x, ok := x.GetOverloadAdjust().(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { + return x.MinTimeout + } + return nil +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinScale() *v3.Percent { + if x, ok := x.GetOverloadAdjust().(*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale); ok { + return x.MinScale + } + return nil +} + +type isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust interface { + isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() +} + +type ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout struct { + // Sets the minimum duration as an absolute value. + MinTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=min_timeout,json=minTimeout,proto3,oneof"` +} + +type ScaleTimersOverloadActionConfig_ScaleTimer_MinScale struct { + // Sets the minimum duration as a percentage of the maximum value. + MinScale *v3.Percent `protobuf:"bytes,3,opt,name=min_scale,json=minScale,proto3,oneof"` +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() { +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() { +} + +var File_envoy_config_overload_v3_overload_proto protoreflect.FileDescriptor + +var file_envoy_config_overload_v3_overload_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x78, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x22, + 0xa1, 0x01, 0x0a, 0x0d, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x44, 0x0a, 0x11, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, + 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x10, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x4a, 0x0a, 0x14, 0x73, 0x61, 0x74, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x13, + 0x73, 0x61, 0x74, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x22, 0xf9, 0x01, 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x09, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x41, 0x0a, 0x06, 0x73, 0x63, 0x61, 0x6c, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, + 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x74, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, + 0xa7, 0x04, 0x0a, 0x1f, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x7e, 0x0a, 0x13, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x63, 0x61, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, + 0x52, 0x11, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x46, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x1a, 0x80, 0x02, 0x0a, 0x0a, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x72, 0x12, 0x65, 0x0a, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, + 0x20, 0x00, 0x52, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x6d, 0x69, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x6d, 0x69, 0x6e, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x42, 0x16, + 0x0a, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x64, 0x6a, 0x75, 0x73, + 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x80, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x44, 0x4f, + 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x48, 0x54, + 0x54, 0x50, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x53, 0x54, + 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x54, + 0x52, 0x41, 0x4e, 0x53, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x22, 0xe4, 0x01, 0x0a, 0x0e, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x74, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x75, 0x0a, 0x0d, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, + 0x0a, 0x08, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x22, 0x70, 0x0a, 0x13, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, + 0x0a, 0x25, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x77, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x2a, 0x04, 0x18, 0x38, 0x28, 0x0a, 0x52, 0x1f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x6f, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x4f, 0x66, 0x54, 0x77, 0x6f, 0x22, 0xe8, 0x03, 0x0a, 0x0f, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, + 0x10, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x60, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x0f, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x15, 0x62, + 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x34, + 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, 0x33, 0x3b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_overload_v3_overload_proto_rawDescOnce sync.Once + file_envoy_config_overload_v3_overload_proto_rawDescData = file_envoy_config_overload_v3_overload_proto_rawDesc +) + +func file_envoy_config_overload_v3_overload_proto_rawDescGZIP() []byte { + file_envoy_config_overload_v3_overload_proto_rawDescOnce.Do(func() { + file_envoy_config_overload_v3_overload_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_overload_v3_overload_proto_rawDescData) + }) + return file_envoy_config_overload_v3_overload_proto_rawDescData +} + +var file_envoy_config_overload_v3_overload_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_overload_v3_overload_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_envoy_config_overload_v3_overload_proto_goTypes = []interface{}{ + (ScaleTimersOverloadActionConfig_TimerType)(0), // 0: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType + (*ResourceMonitor)(nil), // 1: envoy.config.overload.v3.ResourceMonitor + (*ThresholdTrigger)(nil), // 2: envoy.config.overload.v3.ThresholdTrigger + (*ScaledTrigger)(nil), // 3: envoy.config.overload.v3.ScaledTrigger + (*Trigger)(nil), // 4: envoy.config.overload.v3.Trigger + (*ScaleTimersOverloadActionConfig)(nil), // 5: envoy.config.overload.v3.ScaleTimersOverloadActionConfig + (*OverloadAction)(nil), // 6: envoy.config.overload.v3.OverloadAction + (*LoadShedPoint)(nil), // 7: envoy.config.overload.v3.LoadShedPoint + (*BufferFactoryConfig)(nil), // 8: envoy.config.overload.v3.BufferFactoryConfig + (*OverloadManager)(nil), // 9: envoy.config.overload.v3.OverloadManager + (*ScaleTimersOverloadActionConfig_ScaleTimer)(nil), // 10: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration + (*v3.Percent)(nil), // 13: envoy.type.v3.Percent +} +var file_envoy_config_overload_v3_overload_proto_depIdxs = []int32{ + 11, // 0: envoy.config.overload.v3.ResourceMonitor.typed_config:type_name -> google.protobuf.Any + 2, // 1: envoy.config.overload.v3.Trigger.threshold:type_name -> envoy.config.overload.v3.ThresholdTrigger + 3, // 2: envoy.config.overload.v3.Trigger.scaled:type_name -> envoy.config.overload.v3.ScaledTrigger + 10, // 3: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.timer_scale_factors:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + 4, // 4: envoy.config.overload.v3.OverloadAction.triggers:type_name -> envoy.config.overload.v3.Trigger + 11, // 5: envoy.config.overload.v3.OverloadAction.typed_config:type_name -> google.protobuf.Any + 4, // 6: envoy.config.overload.v3.LoadShedPoint.triggers:type_name -> envoy.config.overload.v3.Trigger + 12, // 7: envoy.config.overload.v3.OverloadManager.refresh_interval:type_name -> google.protobuf.Duration + 1, // 8: envoy.config.overload.v3.OverloadManager.resource_monitors:type_name -> envoy.config.overload.v3.ResourceMonitor + 6, // 9: envoy.config.overload.v3.OverloadManager.actions:type_name -> envoy.config.overload.v3.OverloadAction + 7, // 10: envoy.config.overload.v3.OverloadManager.loadshed_points:type_name -> envoy.config.overload.v3.LoadShedPoint + 8, // 11: envoy.config.overload.v3.OverloadManager.buffer_factory_config:type_name -> envoy.config.overload.v3.BufferFactoryConfig + 0, // 12: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.timer:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType + 12, // 13: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_timeout:type_name -> google.protobuf.Duration + 13, // 14: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_scale:type_name -> envoy.type.v3.Percent + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_envoy_config_overload_v3_overload_proto_init() } +func file_envoy_config_overload_v3_overload_proto_init() { + if File_envoy_config_overload_v3_overload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_overload_v3_overload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceMonitor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ThresholdTrigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaledTrigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaleTimersOverloadActionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverloadAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadShedPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BufferFactoryConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverloadManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaleTimersOverloadActionConfig_ScaleTimer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ResourceMonitor_TypedConfig)(nil), + } + file_envoy_config_overload_v3_overload_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Trigger_Threshold)(nil), + (*Trigger_Scaled)(nil), + } + file_envoy_config_overload_v3_overload_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout)(nil), + (*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_overload_v3_overload_proto_rawDesc, + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_overload_v3_overload_proto_goTypes, + DependencyIndexes: file_envoy_config_overload_v3_overload_proto_depIdxs, + EnumInfos: file_envoy_config_overload_v3_overload_proto_enumTypes, + MessageInfos: file_envoy_config_overload_v3_overload_proto_msgTypes, + }.Build() + File_envoy_config_overload_v3_overload_proto = out.File + file_envoy_config_overload_v3_overload_proto_rawDesc = nil + file_envoy_config_overload_v3_overload_proto_goTypes = nil + file_envoy_config_overload_v3_overload_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go new file mode 100644 index 000000000..267297ed2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go @@ -0,0 +1,1741 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceMonitor with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceMonitor) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceMonitor with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceMonitorMultiError, or nil if none found. +func (m *ResourceMonitor) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceMonitor) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ResourceMonitorValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *ResourceMonitor_TypedConfig: + if v == nil { + err := ResourceMonitorValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ResourceMonitorMultiError(errors) + } + + return nil +} + +// ResourceMonitorMultiError is an error wrapping multiple validation errors +// returned by ResourceMonitor.ValidateAll() if the designated constraints +// aren't met. +type ResourceMonitorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMonitorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMonitorMultiError) AllErrors() []error { return m } + +// ResourceMonitorValidationError is the validation error returned by +// ResourceMonitor.Validate if the designated constraints aren't met. +type ResourceMonitorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceMonitorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceMonitorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceMonitorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceMonitorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceMonitorValidationError) ErrorName() string { return "ResourceMonitorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceMonitorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceMonitor.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceMonitorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceMonitorValidationError{} + +// Validate checks the field values on ThresholdTrigger with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ThresholdTrigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ThresholdTrigger with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ThresholdTriggerMultiError, or nil if none found. +func (m *ThresholdTrigger) ValidateAll() error { + return m.validate(true) +} + +func (m *ThresholdTrigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetValue(); val < 0 || val > 1 { + err := ThresholdTriggerValidationError{ + field: "Value", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ThresholdTriggerMultiError(errors) + } + + return nil +} + +// ThresholdTriggerMultiError is an error wrapping multiple validation errors +// returned by ThresholdTrigger.ValidateAll() if the designated constraints +// aren't met. +type ThresholdTriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ThresholdTriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ThresholdTriggerMultiError) AllErrors() []error { return m } + +// ThresholdTriggerValidationError is the validation error returned by +// ThresholdTrigger.Validate if the designated constraints aren't met. +type ThresholdTriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ThresholdTriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ThresholdTriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ThresholdTriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ThresholdTriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ThresholdTriggerValidationError) ErrorName() string { return "ThresholdTriggerValidationError" } + +// Error satisfies the builtin error interface +func (e ThresholdTriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sThresholdTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ThresholdTriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ThresholdTriggerValidationError{} + +// Validate checks the field values on ScaledTrigger with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScaledTrigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScaledTrigger with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScaledTriggerMultiError, or +// nil if none found. +func (m *ScaledTrigger) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaledTrigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetScalingThreshold(); val < 0 || val > 1 { + err := ScaledTriggerValidationError{ + field: "ScalingThreshold", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if val := m.GetSaturationThreshold(); val < 0 || val > 1 { + err := ScaledTriggerValidationError{ + field: "SaturationThreshold", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScaledTriggerMultiError(errors) + } + + return nil +} + +// ScaledTriggerMultiError is an error wrapping multiple validation errors +// returned by ScaledTrigger.ValidateAll() if the designated constraints +// aren't met. +type ScaledTriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaledTriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaledTriggerMultiError) AllErrors() []error { return m } + +// ScaledTriggerValidationError is the validation error returned by +// ScaledTrigger.Validate if the designated constraints aren't met. +type ScaledTriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaledTriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaledTriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaledTriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaledTriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaledTriggerValidationError) ErrorName() string { return "ScaledTriggerValidationError" } + +// Error satisfies the builtin error interface +func (e ScaledTriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaledTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaledTriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaledTriggerValidationError{} + +// Validate checks the field values on Trigger with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Trigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Trigger with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TriggerMultiError, or nil if none found. +func (m *Trigger) ValidateAll() error { + return m.validate(true) +} + +func (m *Trigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TriggerValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofTriggerOneofPresent := false + switch v := m.TriggerOneof.(type) { + case *Trigger_Threshold: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true + + if all { + switch v := interface{}(m.GetThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Trigger_Scaled: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true + + if all { + switch v := interface{}(m.GetScaled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScaled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTriggerOneofPresent { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return TriggerMultiError(errors) + } + + return nil +} + +// TriggerMultiError is an error wrapping multiple validation errors returned +// by Trigger.ValidateAll() if the designated constraints aren't met. +type TriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TriggerMultiError) AllErrors() []error { return m } + +// TriggerValidationError is the validation error returned by Trigger.Validate +// if the designated constraints aren't met. +type TriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TriggerValidationError) ErrorName() string { return "TriggerValidationError" } + +// Error satisfies the builtin error interface +func (e TriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TriggerValidationError{} + +// Validate checks the field values on ScaleTimersOverloadActionConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScaleTimersOverloadActionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScaleTimersOverloadActionConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ScaleTimersOverloadActionConfigMultiError, or nil if none found. +func (m *ScaleTimersOverloadActionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaleTimersOverloadActionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetTimerScaleFactors()) < 1 { + err := ScaleTimersOverloadActionConfigValidationError{ + field: "TimerScaleFactors", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTimerScaleFactors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScaleTimersOverloadActionConfigMultiError(errors) + } + + return nil +} + +// ScaleTimersOverloadActionConfigMultiError is an error wrapping multiple +// validation errors returned by ScaleTimersOverloadActionConfig.ValidateAll() +// if the designated constraints aren't met. +type ScaleTimersOverloadActionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaleTimersOverloadActionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaleTimersOverloadActionConfigMultiError) AllErrors() []error { return m } + +// ScaleTimersOverloadActionConfigValidationError is the validation error +// returned by ScaleTimersOverloadActionConfig.Validate if the designated +// constraints aren't met. +type ScaleTimersOverloadActionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaleTimersOverloadActionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaleTimersOverloadActionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaleTimersOverloadActionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaleTimersOverloadActionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaleTimersOverloadActionConfigValidationError) ErrorName() string { + return "ScaleTimersOverloadActionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ScaleTimersOverloadActionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaleTimersOverloadActionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaleTimersOverloadActionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaleTimersOverloadActionConfigValidationError{} + +// Validate checks the field values on OverloadAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OverloadAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OverloadAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OverloadActionMultiError, +// or nil if none found. +func (m *OverloadAction) ValidateAll() error { + return m.validate(true) +} + +func (m *OverloadAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := OverloadActionValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetTriggers()) < 1 { + err := OverloadActionValidationError{ + field: "Triggers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTriggers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OverloadActionMultiError(errors) + } + + return nil +} + +// OverloadActionMultiError is an error wrapping multiple validation errors +// returned by OverloadAction.ValidateAll() if the designated constraints +// aren't met. +type OverloadActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OverloadActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OverloadActionMultiError) AllErrors() []error { return m } + +// OverloadActionValidationError is the validation error returned by +// OverloadAction.Validate if the designated constraints aren't met. +type OverloadActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OverloadActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OverloadActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OverloadActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OverloadActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OverloadActionValidationError) ErrorName() string { return "OverloadActionValidationError" } + +// Error satisfies the builtin error interface +func (e OverloadActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOverloadAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OverloadActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OverloadActionValidationError{} + +// Validate checks the field values on LoadShedPoint with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LoadShedPoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadShedPoint with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LoadShedPointMultiError, or +// nil if none found. +func (m *LoadShedPoint) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadShedPoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := LoadShedPointValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetTriggers()) < 1 { + err := LoadShedPointValidationError{ + field: "Triggers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTriggers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadShedPointMultiError(errors) + } + + return nil +} + +// LoadShedPointMultiError is an error wrapping multiple validation errors +// returned by LoadShedPoint.ValidateAll() if the designated constraints +// aren't met. +type LoadShedPointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadShedPointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadShedPointMultiError) AllErrors() []error { return m } + +// LoadShedPointValidationError is the validation error returned by +// LoadShedPoint.Validate if the designated constraints aren't met. +type LoadShedPointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadShedPointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadShedPointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadShedPointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadShedPointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadShedPointValidationError) ErrorName() string { return "LoadShedPointValidationError" } + +// Error satisfies the builtin error interface +func (e LoadShedPointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadShedPoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadShedPointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadShedPointValidationError{} + +// Validate checks the field values on BufferFactoryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *BufferFactoryConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BufferFactoryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BufferFactoryConfigMultiError, or nil if none found. +func (m *BufferFactoryConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *BufferFactoryConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetMinimumAccountToTrackPowerOfTwo(); val < 10 || val > 56 { + err := BufferFactoryConfigValidationError{ + field: "MinimumAccountToTrackPowerOfTwo", + reason: "value must be inside range [10, 56]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return BufferFactoryConfigMultiError(errors) + } + + return nil +} + +// BufferFactoryConfigMultiError is an error wrapping multiple validation +// errors returned by BufferFactoryConfig.ValidateAll() if the designated +// constraints aren't met. +type BufferFactoryConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BufferFactoryConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BufferFactoryConfigMultiError) AllErrors() []error { return m } + +// BufferFactoryConfigValidationError is the validation error returned by +// BufferFactoryConfig.Validate if the designated constraints aren't met. +type BufferFactoryConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BufferFactoryConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BufferFactoryConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BufferFactoryConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BufferFactoryConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BufferFactoryConfigValidationError) ErrorName() string { + return "BufferFactoryConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e BufferFactoryConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBufferFactoryConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BufferFactoryConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BufferFactoryConfigValidationError{} + +// Validate checks the field values on OverloadManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OverloadManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OverloadManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OverloadManagerMultiError, or nil if none found. +func (m *OverloadManager) ValidateAll() error { + return m.validate(true) +} + +func (m *OverloadManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRefreshInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRefreshInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetResourceMonitors()) < 1 { + err := OverloadManagerValidationError{ + field: "ResourceMonitors", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResourceMonitors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetLoadshedPoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetBufferFactoryConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBufferFactoryConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OverloadManagerMultiError(errors) + } + + return nil +} + +// OverloadManagerMultiError is an error wrapping multiple validation errors +// returned by OverloadManager.ValidateAll() if the designated constraints +// aren't met. +type OverloadManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OverloadManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OverloadManagerMultiError) AllErrors() []error { return m } + +// OverloadManagerValidationError is the validation error returned by +// OverloadManager.Validate if the designated constraints aren't met. +type OverloadManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OverloadManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OverloadManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OverloadManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OverloadManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OverloadManagerValidationError) ErrorName() string { return "OverloadManagerValidationError" } + +// Error satisfies the builtin error interface +func (e OverloadManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOverloadManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OverloadManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OverloadManagerValidationError{} + +// Validate checks the field values on +// ScaleTimersOverloadActionConfig_ScaleTimer with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScaleTimersOverloadActionConfig_ScaleTimer with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ScaleTimersOverloadActionConfig_ScaleTimerMultiError, or nil if none found. +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _ScaleTimersOverloadActionConfig_ScaleTimer_Timer_NotInLookup[m.GetTimer()]; ok { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "Timer", + reason: "value must not be in list [UNSPECIFIED]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := ScaleTimersOverloadActionConfig_TimerType_name[int32(m.GetTimer())]; !ok { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "Timer", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofOverloadAdjustPresent := false + switch v := m.OverloadAdjust.(type) { + case *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true + + if all { + switch v := interface{}(m.GetMinTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true + + if all { + switch v := interface{}(m.GetMinScale()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinScale()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOverloadAdjustPresent { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScaleTimersOverloadActionConfig_ScaleTimerMultiError(errors) + } + + return nil +} + +// ScaleTimersOverloadActionConfig_ScaleTimerMultiError is an error wrapping +// multiple validation errors returned by +// ScaleTimersOverloadActionConfig_ScaleTimer.ValidateAll() if the designated +// constraints aren't met. +type ScaleTimersOverloadActionConfig_ScaleTimerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaleTimersOverloadActionConfig_ScaleTimerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaleTimersOverloadActionConfig_ScaleTimerMultiError) AllErrors() []error { return m } + +// ScaleTimersOverloadActionConfig_ScaleTimerValidationError is the validation +// error returned by ScaleTimersOverloadActionConfig_ScaleTimer.Validate if +// the designated constraints aren't met. +type ScaleTimersOverloadActionConfig_ScaleTimerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) ErrorName() string { + return "ScaleTimersOverloadActionConfig_ScaleTimerValidationError" +} + +// Error satisfies the builtin error interface +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaleTimersOverloadActionConfig_ScaleTimer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaleTimersOverloadActionConfig_ScaleTimerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaleTimersOverloadActionConfig_ScaleTimerValidationError{} + +var _ScaleTimersOverloadActionConfig_ScaleTimer_Timer_NotInLookup = map[ScaleTimersOverloadActionConfig_TimerType]struct{}{ + 0: {}, +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go new file mode 100644 index 000000000..3a8ba0054 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go @@ -0,0 +1,938 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceMonitor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMonitor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ResourceMonitor_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ThresholdTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThresholdTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ThresholdTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ScaledTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaledTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaledTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SaturationThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SaturationThreshold)))) + i-- + dAtA[i] = 0x11 + } + if m.ScalingThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ScalingThreshold)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Trigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Trigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TriggerOneof.(*Trigger_Scaled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TriggerOneof.(*Trigger_Threshold); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Trigger_Threshold) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Threshold) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Threshold != nil { + size, err := m.Threshold.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Trigger_Scaled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Scaled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Scaled != nil { + size, err := m.Scaled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Timer != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timer)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinTimeout != nil { + size, err := (*durationpb.Duration)(m.MinTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinScale != nil { + if vtmsg, ok := interface{}(m.MinScale).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinScale) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TimerScaleFactors) > 0 { + for iNdEx := len(m.TimerScaleFactors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TimerScaleFactors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OverloadAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadShedPoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadShedPoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadShedPoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BufferFactoryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferFactoryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferFactoryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinimumAccountToTrackPowerOfTwo != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinimumAccountToTrackPowerOfTwo)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OverloadManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoadshedPoints) > 0 { + for iNdEx := len(m.LoadshedPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadshedPoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.BufferFactoryConfig != nil { + size, err := m.BufferFactoryConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceMonitors) > 0 { + for iNdEx := len(m.ResourceMonitors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceMonitors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.RefreshInterval != nil { + size, err := (*durationpb.Duration)(m.RefreshInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceMonitor_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ThresholdTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ScaledTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScalingThreshold != 0 { + n += 9 + } + if m.SaturationThreshold != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TriggerOneof.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger_Threshold) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Threshold != nil { + l = m.Threshold.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Trigger_Scaled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scaled != nil { + l = m.Scaled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timer != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timer)) + } + if vtmsg, ok := m.OverloadAdjust.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeout != nil { + l = (*durationpb.Duration)(m.MinTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinScale != nil { + if size, ok := interface{}(m.MinScale).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinScale) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TimerScaleFactors) > 0 { + for _, e := range m.TimerScaleFactors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadShedPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BufferFactoryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumAccountToTrackPowerOfTwo != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinimumAccountToTrackPowerOfTwo)) + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RefreshInterval != nil { + l = (*durationpb.Duration)(m.RefreshInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceMonitors) > 0 { + for _, e := range m.ResourceMonitors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BufferFactoryConfig != nil { + l = m.BufferFactoryConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LoadshedPoints) > 0 { + for _, e := range m.LoadshedPoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go new file mode 100644 index 000000000..c069fae84 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go @@ -0,0 +1,1788 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Should we do safe-list or block-list style access control? +type RBAC_Action int32 + +const ( + // The policies grant access to principals. The rest are denied. This is safe-list style + // access control. This is the default type. + RBAC_ALLOW RBAC_Action = 0 + // The policies deny access to principals. The rest are allowed. This is block-list style + // access control. + RBAC_DENY RBAC_Action = 1 + // The policies set the “access_log_hint“ dynamic metadata key based on if requests match. + // All requests are allowed. + RBAC_LOG RBAC_Action = 2 +) + +// Enum value maps for RBAC_Action. +var ( + RBAC_Action_name = map[int32]string{ + 0: "ALLOW", + 1: "DENY", + 2: "LOG", + } + RBAC_Action_value = map[string]int32{ + "ALLOW": 0, + "DENY": 1, + "LOG": 2, + } +) + +func (x RBAC_Action) Enum() *RBAC_Action { + p := new(RBAC_Action) + *p = x + return p +} + +func (x RBAC_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RBAC_Action) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_rbac_v3_rbac_proto_enumTypes[0].Descriptor() +} + +func (RBAC_Action) Type() protoreflect.EnumType { + return &file_envoy_config_rbac_v3_rbac_proto_enumTypes[0] +} + +func (x RBAC_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RBAC_Action.Descriptor instead. +func (RBAC_Action) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} +} + +// Deny and allow here refer to RBAC decisions, not actions. +type RBAC_AuditLoggingOptions_AuditCondition int32 + +const ( + // Never audit. + RBAC_AuditLoggingOptions_NONE RBAC_AuditLoggingOptions_AuditCondition = 0 + // Audit when RBAC denies the request. + RBAC_AuditLoggingOptions_ON_DENY RBAC_AuditLoggingOptions_AuditCondition = 1 + // Audit when RBAC allows the request. + RBAC_AuditLoggingOptions_ON_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 2 + // Audit whether RBAC allows or denies the request. + RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 3 +) + +// Enum value maps for RBAC_AuditLoggingOptions_AuditCondition. +var ( + RBAC_AuditLoggingOptions_AuditCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_DENY", + 2: "ON_ALLOW", + 3: "ON_DENY_AND_ALLOW", + } + RBAC_AuditLoggingOptions_AuditCondition_value = map[string]int32{ + "NONE": 0, + "ON_DENY": 1, + "ON_ALLOW": 2, + "ON_DENY_AND_ALLOW": 3, + } +) + +func (x RBAC_AuditLoggingOptions_AuditCondition) Enum() *RBAC_AuditLoggingOptions_AuditCondition { + p := new(RBAC_AuditLoggingOptions_AuditCondition) + *p = x + return p +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_rbac_v3_rbac_proto_enumTypes[1].Descriptor() +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Type() protoreflect.EnumType { + return &file_envoy_config_rbac_v3_rbac_proto_enumTypes[1] +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditCondition.Descriptor instead. +func (RBAC_AuditLoggingOptions_AuditCondition) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// Role Based Access Control (RBAC) provides service-level and method-level access control for a +// service. Requests are allowed or denied based on the “action“ and whether a matching policy is +// found. For instance, if the action is ALLOW and a matching policy is found the request should be +// allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// “access_log_hint“ value in the shared key namespace 'envoy.common' is set to “true“ indicating +// the request should be logged. +// +// Here is an example of RBAC configuration. It has two policies: +// +// - Service account “cluster.local/ns/default/sa/admin“ has full access to the service, and so +// does "cluster.local/ns/default/sa/superuser". +// +// - Any user can read (“GET“) the service at paths with prefix “/products“, so long as the +// destination port is either 80 or 443. +// +// .. code-block:: yaml +// +// action: ALLOW +// policies: +// "service-admin": +// permissions: +// +// - any: true +// principals: +// +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/admin" +// +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/superuser" +// "product-viewer": +// permissions: +// +// - and_rules: +// rules: +// +// - header: +// name: ":method" +// string_match: +// exact: "GET" +// +// - url_path: +// path: { prefix: "/products" } +// +// - or_rules: +// rules: +// +// - destination_port: 80 +// +// - destination_port: 443 +// principals: +// +// - any: true +type RBAC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // - “ALLOW“: Allows the request if and only if there is a policy that matches + // the request. + // - “DENY“: Allows the request if and only if there are no policies that + // match the request. + // - “LOG“: Allows all requests. If at least one policy matches, the dynamic + // metadata key “access_log_hint“ is set to the value “true“ under the shared + // key namespace “envoy.common“. If no policies match, it is set to “false“. + // Other actions do not modify this key. + Action RBAC_Action `protobuf:"varint,1,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` + // Maps from policy name to policy. A match occurs when at least one policy matches the request. + // The policies are evaluated in lexicographic order of the policy name. + Policies map[string]*Policy `protobuf:"bytes,2,rep,name=policies,proto3" json:"policies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Audit logging options that include the condition for audit logging to happen + // and audit logger configurations. + // + // [#not-implemented-hide:] + AuditLoggingOptions *RBAC_AuditLoggingOptions `protobuf:"bytes,3,opt,name=audit_logging_options,json=auditLoggingOptions,proto3" json:"audit_logging_options,omitempty"` +} + +func (x *RBAC) Reset() { + *x = RBAC{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC) ProtoMessage() {} + +func (x *RBAC) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC.ProtoReflect.Descriptor instead. +func (*RBAC) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0} +} + +func (x *RBAC) GetAction() RBAC_Action { + if x != nil { + return x.Action + } + return RBAC_ALLOW +} + +func (x *RBAC) GetPolicies() map[string]*Policy { + if x != nil { + return x.Policies + } + return nil +} + +func (x *RBAC) GetAuditLoggingOptions() *RBAC_AuditLoggingOptions { + if x != nil { + return x.AuditLoggingOptions + } + return nil +} + +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the “any“ field set to true should be used. + Permissions []*Permission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the “any“ field set to + // true should be used. + Principals []*Principal `protobuf:"bytes,2,rep,name=principals,proto3" json:"principals,omitempty"` + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + Condition *v1alpha1.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + CheckedCondition *v1alpha1.CheckedExpr `protobuf:"bytes,4,opt,name=checked_condition,json=checkedCondition,proto3" json:"checked_condition,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{1} +} + +func (x *Policy) GetPermissions() []*Permission { + if x != nil { + return x.Permissions + } + return nil +} + +func (x *Policy) GetPrincipals() []*Principal { + if x != nil { + return x.Principals + } + return nil +} + +func (x *Policy) GetCondition() *v1alpha1.Expr { + if x != nil { + return x.Condition + } + return nil +} + +func (x *Policy) GetCheckedCondition() *v1alpha1.CheckedExpr { + if x != nil { + return x.CheckedCondition + } + return nil +} + +// Permission defines an action (or actions) that a principal can take. +// [#next-free-field: 14] +type Permission struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *Permission_AndRules + // *Permission_OrRules + // *Permission_Any + // *Permission_Header + // *Permission_UrlPath + // *Permission_DestinationIp + // *Permission_DestinationPort + // *Permission_DestinationPortRange + // *Permission_Metadata + // *Permission_NotRule + // *Permission_RequestedServerName + // *Permission_Matcher + // *Permission_UriTemplate + Rule isPermission_Rule `protobuf_oneof:"rule"` +} + +func (x *Permission) Reset() { + *x = Permission{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Permission) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Permission) ProtoMessage() {} + +func (x *Permission) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Permission.ProtoReflect.Descriptor instead. +func (*Permission) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{2} +} + +func (m *Permission) GetRule() isPermission_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *Permission) GetAndRules() *Permission_Set { + if x, ok := x.GetRule().(*Permission_AndRules); ok { + return x.AndRules + } + return nil +} + +func (x *Permission) GetOrRules() *Permission_Set { + if x, ok := x.GetRule().(*Permission_OrRules); ok { + return x.OrRules + } + return nil +} + +func (x *Permission) GetAny() bool { + if x, ok := x.GetRule().(*Permission_Any); ok { + return x.Any + } + return false +} + +func (x *Permission) GetHeader() *v3.HeaderMatcher { + if x, ok := x.GetRule().(*Permission_Header); ok { + return x.Header + } + return nil +} + +func (x *Permission) GetUrlPath() *v31.PathMatcher { + if x, ok := x.GetRule().(*Permission_UrlPath); ok { + return x.UrlPath + } + return nil +} + +func (x *Permission) GetDestinationIp() *v32.CidrRange { + if x, ok := x.GetRule().(*Permission_DestinationIp); ok { + return x.DestinationIp + } + return nil +} + +func (x *Permission) GetDestinationPort() uint32 { + if x, ok := x.GetRule().(*Permission_DestinationPort); ok { + return x.DestinationPort + } + return 0 +} + +func (x *Permission) GetDestinationPortRange() *v33.Int32Range { + if x, ok := x.GetRule().(*Permission_DestinationPortRange); ok { + return x.DestinationPortRange + } + return nil +} + +func (x *Permission) GetMetadata() *v31.MetadataMatcher { + if x, ok := x.GetRule().(*Permission_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *Permission) GetNotRule() *Permission { + if x, ok := x.GetRule().(*Permission_NotRule); ok { + return x.NotRule + } + return nil +} + +func (x *Permission) GetRequestedServerName() *v31.StringMatcher { + if x, ok := x.GetRule().(*Permission_RequestedServerName); ok { + return x.RequestedServerName + } + return nil +} + +func (x *Permission) GetMatcher() *v32.TypedExtensionConfig { + if x, ok := x.GetRule().(*Permission_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Permission) GetUriTemplate() *v32.TypedExtensionConfig { + if x, ok := x.GetRule().(*Permission_UriTemplate); ok { + return x.UriTemplate + } + return nil +} + +type isPermission_Rule interface { + isPermission_Rule() +} + +type Permission_AndRules struct { + // A set of rules that all must match in order to define the action. + AndRules *Permission_Set `protobuf:"bytes,1,opt,name=and_rules,json=andRules,proto3,oneof"` +} + +type Permission_OrRules struct { + // A set of rules where at least one must match in order to define the action. + OrRules *Permission_Set `protobuf:"bytes,2,opt,name=or_rules,json=orRules,proto3,oneof"` +} + +type Permission_Any struct { + // When any is set, it matches any action. + Any bool `protobuf:"varint,3,opt,name=any,proto3,oneof"` +} + +type Permission_Header struct { + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the “url_path“ + // field if you want to match the URL path without the query and fragment string. + Header *v3.HeaderMatcher `protobuf:"bytes,4,opt,name=header,proto3,oneof"` +} + +type Permission_UrlPath struct { + // A URL path on the incoming HTTP request. Only available for HTTP. + UrlPath *v31.PathMatcher `protobuf:"bytes,10,opt,name=url_path,json=urlPath,proto3,oneof"` +} + +type Permission_DestinationIp struct { + // A CIDR block that describes the destination IP. + DestinationIp *v32.CidrRange `protobuf:"bytes,5,opt,name=destination_ip,json=destinationIp,proto3,oneof"` +} + +type Permission_DestinationPort struct { + // A port number that describes the destination port connecting to. + DestinationPort uint32 `protobuf:"varint,6,opt,name=destination_port,json=destinationPort,proto3,oneof"` +} + +type Permission_DestinationPortRange struct { + // A port number range that describes a range of destination ports connecting to. + DestinationPortRange *v33.Int32Range `protobuf:"bytes,11,opt,name=destination_port_range,json=destinationPortRange,proto3,oneof"` +} + +type Permission_Metadata struct { + // Metadata that describes additional information about the action. + Metadata *v31.MetadataMatcher `protobuf:"bytes,7,opt,name=metadata,proto3,oneof"` +} + +type Permission_NotRule struct { + // Negates matching the provided permission. For instance, if the value of + // “not_rule“ would match, this permission would not match. Conversely, if + // the value of “not_rule“ would not match, this permission would match. + NotRule *Permission `protobuf:"bytes,8,opt,name=not_rule,json=notRule,proto3,oneof"` +} + +type Permission_RequestedServerName struct { + // The request server from the client's connection request. This is + // typically TLS SNI. + // + // .. attention:: + // + // The behavior of this field may be affected by how Envoy is configured + // as explained below. + // + // * If the :ref:`TLS Inspector ` + // filter is not added, and if a ``FilterChainMatch`` is not defined for + // the :ref:`server name + // `, + // a TLS connection's requested SNI server name will be treated as if it + // wasn't present. + // + // * A :ref:`listener filter ` may + // overwrite a connection's requested server name within Envoy. + // + // Please refer to :ref:`this FAQ entry ` to learn to + // setup SNI. + RequestedServerName *v31.StringMatcher `protobuf:"bytes,9,opt,name=requested_server_name,json=requestedServerName,proto3,oneof"` +} + +type Permission_Matcher struct { + // Extension for configuring custom matchers for RBAC. + // [#extension-category: envoy.rbac.matchers] + Matcher *v32.TypedExtensionConfig `protobuf:"bytes,12,opt,name=matcher,proto3,oneof"` +} + +type Permission_UriTemplate struct { + // URI template path matching. + // [#extension-category: envoy.path.match] + UriTemplate *v32.TypedExtensionConfig `protobuf:"bytes,13,opt,name=uri_template,json=uriTemplate,proto3,oneof"` +} + +func (*Permission_AndRules) isPermission_Rule() {} + +func (*Permission_OrRules) isPermission_Rule() {} + +func (*Permission_Any) isPermission_Rule() {} + +func (*Permission_Header) isPermission_Rule() {} + +func (*Permission_UrlPath) isPermission_Rule() {} + +func (*Permission_DestinationIp) isPermission_Rule() {} + +func (*Permission_DestinationPort) isPermission_Rule() {} + +func (*Permission_DestinationPortRange) isPermission_Rule() {} + +func (*Permission_Metadata) isPermission_Rule() {} + +func (*Permission_NotRule) isPermission_Rule() {} + +func (*Permission_RequestedServerName) isPermission_Rule() {} + +func (*Permission_Matcher) isPermission_Rule() {} + +func (*Permission_UriTemplate) isPermission_Rule() {} + +// Principal defines an identity or a group of identities for a downstream +// subject. +// [#next-free-field: 13] +type Principal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Identifier: + // + // *Principal_AndIds + // *Principal_OrIds + // *Principal_Any + // *Principal_Authenticated_ + // *Principal_SourceIp + // *Principal_DirectRemoteIp + // *Principal_RemoteIp + // *Principal_Header + // *Principal_UrlPath + // *Principal_Metadata + // *Principal_FilterState + // *Principal_NotId + Identifier isPrincipal_Identifier `protobuf_oneof:"identifier"` +} + +func (x *Principal) Reset() { + *x = Principal{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal) ProtoMessage() {} + +func (x *Principal) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal.ProtoReflect.Descriptor instead. +func (*Principal) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3} +} + +func (m *Principal) GetIdentifier() isPrincipal_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (x *Principal) GetAndIds() *Principal_Set { + if x, ok := x.GetIdentifier().(*Principal_AndIds); ok { + return x.AndIds + } + return nil +} + +func (x *Principal) GetOrIds() *Principal_Set { + if x, ok := x.GetIdentifier().(*Principal_OrIds); ok { + return x.OrIds + } + return nil +} + +func (x *Principal) GetAny() bool { + if x, ok := x.GetIdentifier().(*Principal_Any); ok { + return x.Any + } + return false +} + +func (x *Principal) GetAuthenticated() *Principal_Authenticated { + if x, ok := x.GetIdentifier().(*Principal_Authenticated_); ok { + return x.Authenticated + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/rbac/v3/rbac.proto. +func (x *Principal) GetSourceIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_SourceIp); ok { + return x.SourceIp + } + return nil +} + +func (x *Principal) GetDirectRemoteIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_DirectRemoteIp); ok { + return x.DirectRemoteIp + } + return nil +} + +func (x *Principal) GetRemoteIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_RemoteIp); ok { + return x.RemoteIp + } + return nil +} + +func (x *Principal) GetHeader() *v3.HeaderMatcher { + if x, ok := x.GetIdentifier().(*Principal_Header); ok { + return x.Header + } + return nil +} + +func (x *Principal) GetUrlPath() *v31.PathMatcher { + if x, ok := x.GetIdentifier().(*Principal_UrlPath); ok { + return x.UrlPath + } + return nil +} + +func (x *Principal) GetMetadata() *v31.MetadataMatcher { + if x, ok := x.GetIdentifier().(*Principal_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *Principal) GetFilterState() *v31.FilterStateMatcher { + if x, ok := x.GetIdentifier().(*Principal_FilterState); ok { + return x.FilterState + } + return nil +} + +func (x *Principal) GetNotId() *Principal { + if x, ok := x.GetIdentifier().(*Principal_NotId); ok { + return x.NotId + } + return nil +} + +type isPrincipal_Identifier interface { + isPrincipal_Identifier() +} + +type Principal_AndIds struct { + // A set of identifiers that all must match in order to define the + // downstream. + AndIds *Principal_Set `protobuf:"bytes,1,opt,name=and_ids,json=andIds,proto3,oneof"` +} + +type Principal_OrIds struct { + // A set of identifiers at least one must match in order to define the + // downstream. + OrIds *Principal_Set `protobuf:"bytes,2,opt,name=or_ids,json=orIds,proto3,oneof"` +} + +type Principal_Any struct { + // When any is set, it matches any downstream. + Any bool `protobuf:"varint,3,opt,name=any,proto3,oneof"` +} + +type Principal_Authenticated_ struct { + // Authenticated attributes that identify the downstream. + Authenticated *Principal_Authenticated `protobuf:"bytes,4,opt,name=authenticated,proto3,oneof"` +} + +type Principal_SourceIp struct { + // A CIDR block that describes the downstream IP. + // This address will honor proxy protocol, but will not honor XFF. + // + // This field is deprecated; either use :ref:`remote_ip + // ` for the same + // behavior, or use + // :ref:`direct_remote_ip `. + // + // Deprecated: Marked as deprecated in envoy/config/rbac/v3/rbac.proto. + SourceIp *v32.CidrRange `protobuf:"bytes,5,opt,name=source_ip,json=sourceIp,proto3,oneof"` +} + +type Principal_DirectRemoteIp struct { + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. + DirectRemoteIp *v32.CidrRange `protobuf:"bytes,10,opt,name=direct_remote_ip,json=directRemoteIp,proto3,oneof"` +} + +type Principal_RemoteIp struct { + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. + RemoteIp *v32.CidrRange `protobuf:"bytes,11,opt,name=remote_ip,json=remoteIp,proto3,oneof"` +} + +type Principal_Header struct { + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the “url_path“ field if you + // want to match the URL path without the query and fragment string. + Header *v3.HeaderMatcher `protobuf:"bytes,6,opt,name=header,proto3,oneof"` +} + +type Principal_UrlPath struct { + // A URL path on the incoming HTTP request. Only available for HTTP. + UrlPath *v31.PathMatcher `protobuf:"bytes,9,opt,name=url_path,json=urlPath,proto3,oneof"` +} + +type Principal_Metadata struct { + // Metadata that describes additional information about the principal. + Metadata *v31.MetadataMatcher `protobuf:"bytes,7,opt,name=metadata,proto3,oneof"` +} + +type Principal_FilterState struct { + // Identifies the principal using a filter state object. + FilterState *v31.FilterStateMatcher `protobuf:"bytes,12,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +type Principal_NotId struct { + // Negates matching the provided principal. For instance, if the value of + // “not_id“ would match, this principal would not match. Conversely, if the + // value of “not_id“ would not match, this principal would match. + NotId *Principal `protobuf:"bytes,8,opt,name=not_id,json=notId,proto3,oneof"` +} + +func (*Principal_AndIds) isPrincipal_Identifier() {} + +func (*Principal_OrIds) isPrincipal_Identifier() {} + +func (*Principal_Any) isPrincipal_Identifier() {} + +func (*Principal_Authenticated_) isPrincipal_Identifier() {} + +func (*Principal_SourceIp) isPrincipal_Identifier() {} + +func (*Principal_DirectRemoteIp) isPrincipal_Identifier() {} + +func (*Principal_RemoteIp) isPrincipal_Identifier() {} + +func (*Principal_Header) isPrincipal_Identifier() {} + +func (*Principal_UrlPath) isPrincipal_Identifier() {} + +func (*Principal_Metadata) isPrincipal_Identifier() {} + +func (*Principal_FilterState) isPrincipal_Identifier() {} + +func (*Principal_NotId) isPrincipal_Identifier() {} + +// Action defines the result of allowance or denial when a request matches the matcher. +type Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name indicates the policy name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The action to take if the matcher matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // - “ALLOW“: If the request gets matched on ALLOW, it is permitted. + // - “DENY“: If the request gets matched on DENY, it is not permitted. + // - “LOG“: If the request gets matched on LOG, it is permitted. Besides, the + // dynamic metadata key “access_log_hint“ under the shared key namespace + // “envoy.common“ will be set to the value “true“. + // - If the request cannot get matched, it will fallback to “DENY“. + // + // Log behavior: + // + // If the RBAC matcher contains at least one LOG action, the dynamic + // metadata key ``access_log_hint`` will be set based on if the request + // get matched on the LOG action. + Action RBAC_Action `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` +} + +func (x *Action) Reset() { + *x = Action{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Action) ProtoMessage() {} + +func (x *Action) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Action.ProtoReflect.Descriptor instead. +func (*Action) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{4} +} + +func (x *Action) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Action) GetAction() RBAC_Action { + if x != nil { + return x.Action + } + return RBAC_ALLOW +} + +type RBAC_AuditLoggingOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Condition for the audit logging to happen. + // If this condition is met, all the audit loggers configured here will be invoked. + // + // [#not-implemented-hide:] + AuditCondition RBAC_AuditLoggingOptions_AuditCondition `protobuf:"varint,1,opt,name=audit_condition,json=auditCondition,proto3,enum=envoy.config.rbac.v3.RBAC_AuditLoggingOptions_AuditCondition" json:"audit_condition,omitempty"` + // Configurations for RBAC-based authorization audit loggers. + // + // [#not-implemented-hide:] + LoggerConfigs []*RBAC_AuditLoggingOptions_AuditLoggerConfig `protobuf:"bytes,2,rep,name=logger_configs,json=loggerConfigs,proto3" json:"logger_configs,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions) Reset() { + *x = RBAC_AuditLoggingOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RBAC_AuditLoggingOptions) GetAuditCondition() RBAC_AuditLoggingOptions_AuditCondition { + if x != nil { + return x.AuditCondition + } + return RBAC_AuditLoggingOptions_NONE +} + +func (x *RBAC_AuditLoggingOptions) GetLoggerConfigs() []*RBAC_AuditLoggingOptions_AuditLoggerConfig { + if x != nil { + return x.LoggerConfigs + } + return nil +} + +// [#not-implemented-hide:] +type RBAC_AuditLoggingOptions_AuditLoggerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Typed logger configuration. + // + // [#extension-category: envoy.rbac.audit_loggers] + AuditLogger *v32.TypedExtensionConfig `protobuf:"bytes,1,opt,name=audit_logger,json=auditLogger,proto3" json:"audit_logger,omitempty"` + // If true, when the logger is not supported, the data plane will not NACK but simply ignore it. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) Reset() { + *x = RBAC_AuditLoggingOptions_AuditLoggerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditLoggerConfig.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetAuditLogger() *v32.TypedExtensionConfig { + if x != nil { + return x.AuditLogger + } + return nil +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +// Used in the “and_rules“ and “or_rules“ fields in the “rule“ oneof. Depending on the context, +// each are applied with the associated behavior. +type Permission_Set struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rules []*Permission `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *Permission_Set) Reset() { + *x = Permission_Set{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Permission_Set) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Permission_Set) ProtoMessage() {} + +func (x *Permission_Set) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Permission_Set.ProtoReflect.Descriptor instead. +func (*Permission_Set) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Permission_Set) GetRules() []*Permission { + if x != nil { + return x.Rules + } + return nil +} + +// Used in the “and_ids“ and “or_ids“ fields in the “identifier“ oneof. +// Depending on the context, each are applied with the associated behavior. +type Principal_Set struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []*Principal `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *Principal_Set) Reset() { + *x = Principal_Set{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal_Set) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal_Set) ProtoMessage() {} + +func (x *Principal_Set) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal_Set.ProtoReflect.Descriptor instead. +func (*Principal_Set) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Principal_Set) GetIds() []*Principal { + if x != nil { + return x.Ids + } + return nil +} + +// Authentication attributes for a downstream. +type Principal_Authenticated struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. + PrincipalName *v31.StringMatcher `protobuf:"bytes,2,opt,name=principal_name,json=principalName,proto3" json:"principal_name,omitempty"` +} + +func (x *Principal_Authenticated) Reset() { + *x = Principal_Authenticated{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal_Authenticated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal_Authenticated) ProtoMessage() {} + +func (x *Principal_Authenticated) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal_Authenticated.ProtoReflect.Descriptor instead. +func (*Principal_Authenticated) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Principal_Authenticated) GetPrincipalName() *v31.StringMatcher { + if x != nil { + return x.PrincipalName + } + return nil +} + +var File_envoy_config_rbac_v3_rbac_proto protoreflect.FileDescriptor + +var file_envoy_config_rbac_v3_rbac_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe1, 0x06, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x15, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc4, 0x03, 0x0a, 0x13, 0x41, 0x75, 0x64, 0x69, + 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x70, 0x0a, 0x0f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x67, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, + 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x83, 0x01, 0x0a, 0x11, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x4d, 0x0a, 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0b, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x22, 0x4c, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x4e, 0x5f, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x1a, 0x59, + 0x0a, 0x0d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x26, 0x0a, 0x06, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x47, 0x10, + 0x02, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x42, 0x41, 0x43, 0x22, 0x93, 0x03, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4c, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, + 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, + 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x16, 0x12, 0x14, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, + 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, + 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xab, 0x08, 0x0a, 0x0a, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, + 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, + 0x08, 0x6f, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x3e, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, + 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x48, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x36, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x51, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x08, 0x6e, 0x6f, 0x74, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x07, 0x6e, 0x6f, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x0c, + 0x75, 0x72, 0x69, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x0b, 0x75, 0x72, 0x69, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x1a, 0x73, 0x0a, + 0x03, 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, + 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xeb, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, + 0x6e, 0x64, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, + 0x49, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, + 0x12, 0x55, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x70, 0x12, 0x4b, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, + 0x49, 0x64, 0x1a, 0x6d, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x03, 0x69, 0x64, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, + 0x74, 0x1a, 0x97, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, + 0x61, 0x6c, 0x42, 0x11, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, + 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, + 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_rbac_v3_rbac_proto_rawDescOnce sync.Once + file_envoy_config_rbac_v3_rbac_proto_rawDescData = file_envoy_config_rbac_v3_rbac_proto_rawDesc +) + +func file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP() []byte { + file_envoy_config_rbac_v3_rbac_proto_rawDescOnce.Do(func() { + file_envoy_config_rbac_v3_rbac_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_rbac_v3_rbac_proto_rawDescData) + }) + return file_envoy_config_rbac_v3_rbac_proto_rawDescData +} + +var file_envoy_config_rbac_v3_rbac_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_envoy_config_rbac_v3_rbac_proto_goTypes = []interface{}{ + (RBAC_Action)(0), // 0: envoy.config.rbac.v3.RBAC.Action + (RBAC_AuditLoggingOptions_AuditCondition)(0), // 1: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + (*RBAC)(nil), // 2: envoy.config.rbac.v3.RBAC + (*Policy)(nil), // 3: envoy.config.rbac.v3.Policy + (*Permission)(nil), // 4: envoy.config.rbac.v3.Permission + (*Principal)(nil), // 5: envoy.config.rbac.v3.Principal + (*Action)(nil), // 6: envoy.config.rbac.v3.Action + (*RBAC_AuditLoggingOptions)(nil), // 7: envoy.config.rbac.v3.RBAC.AuditLoggingOptions + nil, // 8: envoy.config.rbac.v3.RBAC.PoliciesEntry + (*RBAC_AuditLoggingOptions_AuditLoggerConfig)(nil), // 9: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + (*Permission_Set)(nil), // 10: envoy.config.rbac.v3.Permission.Set + (*Principal_Set)(nil), // 11: envoy.config.rbac.v3.Principal.Set + (*Principal_Authenticated)(nil), // 12: envoy.config.rbac.v3.Principal.Authenticated + (*v1alpha1.Expr)(nil), // 13: google.api.expr.v1alpha1.Expr + (*v1alpha1.CheckedExpr)(nil), // 14: google.api.expr.v1alpha1.CheckedExpr + (*v3.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*v31.PathMatcher)(nil), // 16: envoy.type.matcher.v3.PathMatcher + (*v32.CidrRange)(nil), // 17: envoy.config.core.v3.CidrRange + (*v33.Int32Range)(nil), // 18: envoy.type.v3.Int32Range + (*v31.MetadataMatcher)(nil), // 19: envoy.type.matcher.v3.MetadataMatcher + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (*v32.TypedExtensionConfig)(nil), // 21: envoy.config.core.v3.TypedExtensionConfig + (*v31.FilterStateMatcher)(nil), // 22: envoy.type.matcher.v3.FilterStateMatcher +} +var file_envoy_config_rbac_v3_rbac_proto_depIdxs = []int32{ + 0, // 0: envoy.config.rbac.v3.RBAC.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 8, // 1: envoy.config.rbac.v3.RBAC.policies:type_name -> envoy.config.rbac.v3.RBAC.PoliciesEntry + 7, // 2: envoy.config.rbac.v3.RBAC.audit_logging_options:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions + 4, // 3: envoy.config.rbac.v3.Policy.permissions:type_name -> envoy.config.rbac.v3.Permission + 5, // 4: envoy.config.rbac.v3.Policy.principals:type_name -> envoy.config.rbac.v3.Principal + 13, // 5: envoy.config.rbac.v3.Policy.condition:type_name -> google.api.expr.v1alpha1.Expr + 14, // 6: envoy.config.rbac.v3.Policy.checked_condition:type_name -> google.api.expr.v1alpha1.CheckedExpr + 10, // 7: envoy.config.rbac.v3.Permission.and_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 10, // 8: envoy.config.rbac.v3.Permission.or_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 15, // 9: envoy.config.rbac.v3.Permission.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 10: envoy.config.rbac.v3.Permission.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 17, // 11: envoy.config.rbac.v3.Permission.destination_ip:type_name -> envoy.config.core.v3.CidrRange + 18, // 12: envoy.config.rbac.v3.Permission.destination_port_range:type_name -> envoy.type.v3.Int32Range + 19, // 13: envoy.config.rbac.v3.Permission.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 4, // 14: envoy.config.rbac.v3.Permission.not_rule:type_name -> envoy.config.rbac.v3.Permission + 20, // 15: envoy.config.rbac.v3.Permission.requested_server_name:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 16: envoy.config.rbac.v3.Permission.matcher:type_name -> envoy.config.core.v3.TypedExtensionConfig + 21, // 17: envoy.config.rbac.v3.Permission.uri_template:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 18: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 11, // 19: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 12, // 20: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated + 17, // 21: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 22: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 23: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange + 15, // 24: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 25: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 19, // 26: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 22, // 27: envoy.config.rbac.v3.Principal.filter_state:type_name -> envoy.type.matcher.v3.FilterStateMatcher + 5, // 28: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal + 0, // 29: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 1, // 30: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.audit_condition:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + 9, // 31: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.logger_configs:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + 3, // 32: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy + 21, // 33: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig.audit_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // 34: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission + 5, // 35: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal + 20, // 36: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name +} + +func init() { file_envoy_config_rbac_v3_rbac_proto_init() } +func file_envoy_config_rbac_v3_rbac_proto_init() { + if File_envoy_config_rbac_v3_rbac_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_rbac_v3_rbac_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Permission); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC_AuditLoggingOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC_AuditLoggingOptions_AuditLoggerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Permission_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal_Authenticated); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Permission_AndRules)(nil), + (*Permission_OrRules)(nil), + (*Permission_Any)(nil), + (*Permission_Header)(nil), + (*Permission_UrlPath)(nil), + (*Permission_DestinationIp)(nil), + (*Permission_DestinationPort)(nil), + (*Permission_DestinationPortRange)(nil), + (*Permission_Metadata)(nil), + (*Permission_NotRule)(nil), + (*Permission_RequestedServerName)(nil), + (*Permission_Matcher)(nil), + (*Permission_UriTemplate)(nil), + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Principal_AndIds)(nil), + (*Principal_OrIds)(nil), + (*Principal_Any)(nil), + (*Principal_Authenticated_)(nil), + (*Principal_SourceIp)(nil), + (*Principal_DirectRemoteIp)(nil), + (*Principal_RemoteIp)(nil), + (*Principal_Header)(nil), + (*Principal_UrlPath)(nil), + (*Principal_Metadata)(nil), + (*Principal_FilterState)(nil), + (*Principal_NotId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_rbac_v3_rbac_proto_rawDesc, + NumEnums: 2, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_rbac_v3_rbac_proto_goTypes, + DependencyIndexes: file_envoy_config_rbac_v3_rbac_proto_depIdxs, + EnumInfos: file_envoy_config_rbac_v3_rbac_proto_enumTypes, + MessageInfos: file_envoy_config_rbac_v3_rbac_proto_msgTypes, + }.Build() + File_envoy_config_rbac_v3_rbac_proto = out.File + file_envoy_config_rbac_v3_rbac_proto_rawDesc = nil + file_envoy_config_rbac_v3_rbac_proto_goTypes = nil + file_envoy_config_rbac_v3_rbac_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go new file mode 100644 index 000000000..f80fd6097 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go @@ -0,0 +1,2509 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RBAC with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *RBAC) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RBACMultiError, or nil if none found. +func (m *RBAC) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RBAC_Action_name[int32(m.GetAction())]; !ok { + err := RBACValidationError{ + field: "Action", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetPolicies())) + i := 0 + for key := range m.GetPolicies() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetPolicies()[key] + _ = val + + // no validation rules for Policies[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetAuditLoggingOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLoggingOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RBACMultiError(errors) + } + + return nil +} + +// RBACMultiError is an error wrapping multiple validation errors returned by +// RBAC.ValidateAll() if the designated constraints aren't met. +type RBACMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACMultiError) AllErrors() []error { return m } + +// RBACValidationError is the validation error returned by RBAC.Validate if the +// designated constraints aren't met. +type RBACValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACValidationError) ErrorName() string { return "RBACValidationError" } + +// Error satisfies the builtin error interface +func (e RBACValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACValidationError{} + +// Validate checks the field values on Policy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Policy with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PolicyMultiError, or nil if none found. +func (m *Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPermissions()) < 1 { + err := PolicyValidationError{ + field: "Permissions", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPermissions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(m.GetPrincipals()) < 1 { + err := PolicyValidationError{ + field: "Principals", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPrincipals() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCondition()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCondition()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCheckedCondition()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCheckedCondition()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PolicyMultiError(errors) + } + + return nil +} + +// PolicyMultiError is an error wrapping multiple validation errors returned by +// Policy.ValidateAll() if the designated constraints aren't met. +type PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PolicyMultiError) AllErrors() []error { return m } + +// PolicyValidationError is the validation error returned by Policy.Validate if +// the designated constraints aren't met. +type PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PolicyValidationError) ErrorName() string { return "PolicyValidationError" } + +// Error satisfies the builtin error interface +func (e PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PolicyValidationError{} + +// Validate checks the field values on Permission with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Permission) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Permission with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PermissionMultiError, or +// nil if none found. +func (m *Permission) ValidateAll() error { + return m.validate(true) +} + +func (m *Permission) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *Permission_AndRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_OrRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Any: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAny() != true { + err := PermissionValidationError{ + field: "Any", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Permission_Header: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_UrlPath: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetUrlPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUrlPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_DestinationIp: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_DestinationPort: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetDestinationPort() > 65535 { + err := PermissionValidationError{ + field: "DestinationPort", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Permission_DestinationPortRange: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationPortRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationPortRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Metadata: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_NotRule: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotRule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotRule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_RequestedServerName: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetRequestedServerName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestedServerName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Matcher: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_UriTemplate: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetUriTemplate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUriTemplate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := PermissionValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PermissionMultiError(errors) + } + + return nil +} + +// PermissionMultiError is an error wrapping multiple validation errors +// returned by Permission.ValidateAll() if the designated constraints aren't met. +type PermissionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PermissionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PermissionMultiError) AllErrors() []error { return m } + +// PermissionValidationError is the validation error returned by +// Permission.Validate if the designated constraints aren't met. +type PermissionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PermissionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PermissionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PermissionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PermissionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PermissionValidationError) ErrorName() string { return "PermissionValidationError" } + +// Error satisfies the builtin error interface +func (e PermissionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPermission.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PermissionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PermissionValidationError{} + +// Validate checks the field values on Principal with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Principal) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PrincipalMultiError, or nil +// if none found. +func (m *Principal) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofIdentifierPresent := false + switch v := m.Identifier.(type) { + case *Principal_AndIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetAndIds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndIds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_OrIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetOrIds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrIds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Any: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if m.GetAny() != true { + err := PrincipalValidationError{ + field: "Any", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Principal_Authenticated_: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetAuthenticated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuthenticated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_SourceIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetSourceIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_DirectRemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetDirectRemoteIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDirectRemoteIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_RemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetRemoteIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Header: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_UrlPath: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetUrlPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUrlPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Metadata: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_FilterState: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_NotId: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetNotId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofIdentifierPresent { + err := PrincipalValidationError{ + field: "Identifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PrincipalMultiError(errors) + } + + return nil +} + +// PrincipalMultiError is an error wrapping multiple validation errors returned +// by Principal.ValidateAll() if the designated constraints aren't met. +type PrincipalMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PrincipalMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PrincipalMultiError) AllErrors() []error { return m } + +// PrincipalValidationError is the validation error returned by +// Principal.Validate if the designated constraints aren't met. +type PrincipalValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PrincipalValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PrincipalValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PrincipalValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PrincipalValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PrincipalValidationError) ErrorName() string { return "PrincipalValidationError" } + +// Error satisfies the builtin error interface +func (e PrincipalValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PrincipalValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PrincipalValidationError{} + +// Validate checks the field values on Action with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Action) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Action with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in ActionMultiError, or nil if none found. +func (m *Action) ValidateAll() error { + return m.validate(true) +} + +func (m *Action) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ActionValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Action + + if len(errors) > 0 { + return ActionMultiError(errors) + } + + return nil +} + +// ActionMultiError is an error wrapping multiple validation errors returned by +// Action.ValidateAll() if the designated constraints aren't met. +type ActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActionMultiError) AllErrors() []error { return m } + +// ActionValidationError is the validation error returned by Action.Validate if +// the designated constraints aren't met. +type ActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActionValidationError) ErrorName() string { return "ActionValidationError" } + +// Error satisfies the builtin error interface +func (e ActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActionValidationError{} + +// Validate checks the field values on RBAC_AuditLoggingOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC_AuditLoggingOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RBAC_AuditLoggingOptionsMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RBAC_AuditLoggingOptions_AuditCondition_name[int32(m.GetAuditCondition())]; !ok { + err := RBAC_AuditLoggingOptionsValidationError{ + field: "AuditCondition", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetLoggerConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RBAC_AuditLoggingOptionsMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptionsMultiError is an error wrapping multiple validation +// errors returned by RBAC_AuditLoggingOptions.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptionsMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptionsValidationError is the validation error returned by +// RBAC_AuditLoggingOptions.Validate if the designated constraints aren't met. +type RBAC_AuditLoggingOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptionsValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptionsValidationError{} + +// Validate checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAuditLogger()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLogger()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + if len(errors) > 0 { + return RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError is an error wrapping +// multiple validation errors returned by +// RBAC_AuditLoggingOptions_AuditLoggerConfig.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError is the validation +// error returned by RBAC_AuditLoggingOptions_AuditLoggerConfig.Validate if +// the designated constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions_AuditLoggerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + +// Validate checks the field values on Permission_Set with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Permission_Set) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Permission_Set with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Permission_SetMultiError, +// or nil if none found. +func (m *Permission_Set) ValidateAll() error { + return m.validate(true) +} + +func (m *Permission_Set) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 1 { + err := Permission_SetValidationError{ + field: "Rules", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Permission_SetMultiError(errors) + } + + return nil +} + +// Permission_SetMultiError is an error wrapping multiple validation errors +// returned by Permission_Set.ValidateAll() if the designated constraints +// aren't met. +type Permission_SetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Permission_SetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Permission_SetMultiError) AllErrors() []error { return m } + +// Permission_SetValidationError is the validation error returned by +// Permission_Set.Validate if the designated constraints aren't met. +type Permission_SetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Permission_SetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Permission_SetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Permission_SetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Permission_SetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Permission_SetValidationError) ErrorName() string { return "Permission_SetValidationError" } + +// Error satisfies the builtin error interface +func (e Permission_SetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPermission_Set.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Permission_SetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Permission_SetValidationError{} + +// Validate checks the field values on Principal_Set with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Principal_Set) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal_Set with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Principal_SetMultiError, or +// nil if none found. +func (m *Principal_Set) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal_Set) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetIds()) < 1 { + err := Principal_SetValidationError{ + field: "Ids", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetIds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Principal_SetMultiError(errors) + } + + return nil +} + +// Principal_SetMultiError is an error wrapping multiple validation errors +// returned by Principal_Set.ValidateAll() if the designated constraints +// aren't met. +type Principal_SetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Principal_SetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Principal_SetMultiError) AllErrors() []error { return m } + +// Principal_SetValidationError is the validation error returned by +// Principal_Set.Validate if the designated constraints aren't met. +type Principal_SetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Principal_SetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Principal_SetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Principal_SetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Principal_SetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Principal_SetValidationError) ErrorName() string { return "Principal_SetValidationError" } + +// Error satisfies the builtin error interface +func (e Principal_SetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal_Set.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Principal_SetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Principal_SetValidationError{} + +// Validate checks the field values on Principal_Authenticated with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Principal_Authenticated) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal_Authenticated with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Principal_AuthenticatedMultiError, or nil if none found. +func (m *Principal_Authenticated) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal_Authenticated) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPrincipalName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrincipalName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Principal_AuthenticatedMultiError(errors) + } + + return nil +} + +// Principal_AuthenticatedMultiError is an error wrapping multiple validation +// errors returned by Principal_Authenticated.ValidateAll() if the designated +// constraints aren't met. +type Principal_AuthenticatedMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Principal_AuthenticatedMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Principal_AuthenticatedMultiError) AllErrors() []error { return m } + +// Principal_AuthenticatedValidationError is the validation error returned by +// Principal_Authenticated.Validate if the designated constraints aren't met. +type Principal_AuthenticatedValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Principal_AuthenticatedValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Principal_AuthenticatedValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Principal_AuthenticatedValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Principal_AuthenticatedValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Principal_AuthenticatedValidationError) ErrorName() string { + return "Principal_AuthenticatedValidationError" +} + +// Error satisfies the builtin error interface +func (e Principal_AuthenticatedValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal_Authenticated.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Principal_AuthenticatedValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Principal_AuthenticatedValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 000000000..940a9b37e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,2103 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AuditLogger != nil { + if vtmsg, ok := interface{}(m.AuditLogger).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AuditLogger) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoggerConfigs) > 0 { + for iNdEx := len(m.LoggerConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoggerConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.AuditCondition != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AuditCondition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AuditLoggingOptions != nil { + size, err := m.AuditLoggingOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Policies) > 0 { + for k := range m.Policies { + v := m.Policies[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CheckedCondition != nil { + if vtmsg, ok := interface{}(m.CheckedCondition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CheckedCondition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Condition != nil { + if vtmsg, ok := interface{}(m.Condition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Condition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Principals) > 0 { + for iNdEx := len(m.Principals) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Principals[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Permissions) > 0 { + for iNdEx := len(m.Permissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Permissions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*Permission_UriTemplate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_RequestedServerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_NotRule); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_OrRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_AndRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Permission_AndRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_AndRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndRules != nil { + size, err := m.AndRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Permission_OrRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_OrRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrRules != nil { + size, err := m.OrRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Permission_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Permission_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationIp != nil { + if vtmsg, ok := interface{}(m.DestinationIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DestinationPort)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Permission_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Permission_NotRule) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_NotRule) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotRule != nil { + size, err := m.NotRule.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Permission_RequestedServerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_RequestedServerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestedServerName != nil { + if vtmsg, ok := interface{}(m.RequestedServerName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestedServerName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Permission_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Permission_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Permission_UriTemplate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UriTemplate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UriTemplate != nil { + if vtmsg, ok := interface{}(m.UriTemplate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UriTemplate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Principal_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Ids[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Principal_Authenticated) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Authenticated) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrincipalName != nil { + if vtmsg, ok := interface{}(m.PrincipalName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrincipalName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *Principal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Identifier.(*Principal_FilterState); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_RemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_DirectRemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_NotId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_SourceIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Authenticated_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_OrIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_AndIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Principal_AndIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_AndIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndIds != nil { + size, err := m.AndIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Principal_OrIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_OrIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrIds != nil { + size, err := m.OrIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Principal_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Principal_Authenticated_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Authenticated != nil { + size, err := m.Authenticated.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Principal_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + if vtmsg, ok := interface{}(m.SourceIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourceIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Principal_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Principal_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Principal_NotId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_NotId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotId != nil { + size, err := m.NotId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Principal_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Principal_DirectRemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_DirectRemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectRemoteIp != nil { + if vtmsg, ok := interface{}(m.DirectRemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectRemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Principal_RemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_RemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteIp != nil { + if vtmsg, ok := interface{}(m.RemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Principal_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + if vtmsg, ok := interface{}(m.FilterState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditLogger != nil { + if size, ok := interface{}(m.AuditLogger).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AuditLogger) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC_AuditLoggingOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditCondition != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AuditCondition)) + } + if len(m.LoggerConfigs) > 0 { + for _, e := range m.LoggerConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + if len(m.Policies) > 0 { + for k, v := range m.Policies { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.AuditLoggingOptions != nil { + l = m.AuditLoggingOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Permissions) > 0 { + for _, e := range m.Permissions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Principals) > 0 { + for _, e := range m.Principals { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Condition != nil { + if size, ok := interface{}(m.Condition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Condition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CheckedCondition != nil { + if size, ok := interface{}(m.CheckedCondition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CheckedCondition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Permission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_AndRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndRules != nil { + l = m.AndRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_OrRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrRules != nil { + l = m.OrRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Permission_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationIp != nil { + if size, ok := interface{}(m.DestinationIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.DestinationPort)) + return n +} +func (m *Permission_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_NotRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotRule != nil { + l = m.NotRule.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_RequestedServerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestedServerName != nil { + if size, ok := interface{}(m.RequestedServerName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RequestedServerName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UriTemplate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UriTemplate != nil { + if size, ok := interface{}(m.UriTemplate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UriTemplate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + for _, e := range m.Ids { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_Authenticated) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrincipalName != nil { + if size, ok := interface{}(m.PrincipalName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrincipalName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Principal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Identifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_AndIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndIds != nil { + l = m.AndIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_OrIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrIds != nil { + l = m.OrIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Principal_Authenticated_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Authenticated != nil { + l = m.Authenticated.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + if size, ok := interface{}(m.SourceIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SourceIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_NotId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotId != nil { + l = m.NotId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_DirectRemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectRemoteIp != nil { + if size, ok := interface{}(m.DirectRemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DirectRemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_RemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteIp != nil { + if size, ok := interface{}(m.RemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + if size, ok := interface{}(m.FilterState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go new file mode 100644 index 000000000..a34106595 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go @@ -0,0 +1,573 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 18] +type RouteConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // An array of virtual hosts that make up the route table. + VirtualHosts []*VirtualHost `protobuf:"bytes,2,rep,name=virtual_hosts,json=virtualHosts,proto3" json:"virtual_hosts,omitempty"` + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both “virtual_hosts“ and “vhds“ fields will be used when present. “virtual_hosts“ can be used + // for a base routing table or for infrequently changing virtual hosts. “vhds“ is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with “vhds“ derived configuration + // taking precedence. + Vhds *Vhds `protobuf:"bytes,9,opt,name=vhds,proto3" json:"vhds,omitempty"` + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + InternalOnlyHeaders []string `protobuf:"bytes,3,rep,name=internal_only_headers,json=internalOnlyHeaders,proto3" json:"internal_only_headers,omitempty"` + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,4,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + ResponseHeadersToRemove []string `protobuf:"bytes,5,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Headers mutations at all levels are evaluated, if specified. By default, the order is from most + // specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header + // mutations may override earlier mutations. + // This order can be reversed by setting this field to true. In other words, most specific level mutation + // is evaluated last. + MostSpecificHeaderMutationsWins bool `protobuf:"varint,10,opt,name=most_specific_header_mutations_wins,json=mostSpecificHeaderMutationsWins,proto3" json:"most_specific_header_mutations_wins,omitempty"` + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + ValidateClusters *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=validate_clusters,json=validateClusters,proto3" json:"validate_clusters,omitempty"` + // The maximum bytes of the response :ref:`direct response body + // ` size. If not specified the default + // is 4096. + // + // .. warning:: + // + // Envoy currently holds the content of :ref:`direct response body + // ` in memory. Be careful setting + // this to be larger than the default 4KB, since the allocated memory for direct response body + // is not subject to data plane buffering controls. + MaxDirectResponseBodySizeBytes *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"` + // A list of plugins and their configurations which may be used by a + // :ref:`cluster specifier plugin name ` + // within the route. All “extension.name“ fields in this list must be unique. + ClusterSpecifierPlugins []*ClusterSpecifierPlugin `protobuf:"bytes,12,rep,name=cluster_specifier_plugins,json=clusterSpecifierPlugins,proto3" json:"cluster_specifier_plugins,omitempty"` + // Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. + // Note that policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,13,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // By default, port in :authority header (if any) is used in host matching. + // With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost. + // NOTE: this option will not strip the port number (if any) contained in route config + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field. + IgnorePortInHostMatching bool `protobuf:"varint,14,opt,name=ignore_port_in_host_matching,json=ignorePortInHostMatching,proto3" json:"ignore_port_in_host_matching,omitempty"` + // Ignore path-parameters in path-matching. + // Before RFC3986, URI were like(RFC1808): :///;?# + // Envoy by default takes ":path" as ";". + // For users who want to only match path on the "" portion, this option should be true. + IgnorePathParametersInPathMatching bool `protobuf:"varint,15,opt,name=ignore_path_parameters_in_path_matching,json=ignorePathParametersInPathMatching,proto3" json:"ignore_path_parameters_in_path_matching,omitempty"` + // This field can be used to provide RouteConfiguration level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The metadata field can be used to provide additional information + // about the route configuration. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v3.Metadata `protobuf:"bytes,17,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *RouteConfiguration) Reset() { + *x = RouteConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteConfiguration) ProtoMessage() {} + +func (x *RouteConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteConfiguration.ProtoReflect.Descriptor instead. +func (*RouteConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{0} +} + +func (x *RouteConfiguration) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteConfiguration) GetVirtualHosts() []*VirtualHost { + if x != nil { + return x.VirtualHosts + } + return nil +} + +func (x *RouteConfiguration) GetVhds() *Vhds { + if x != nil { + return x.Vhds + } + return nil +} + +func (x *RouteConfiguration) GetInternalOnlyHeaders() []string { + if x != nil { + return x.InternalOnlyHeaders + } + return nil +} + +func (x *RouteConfiguration) GetResponseHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *RouteConfiguration) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *RouteConfiguration) GetRequestHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *RouteConfiguration) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *RouteConfiguration) GetMostSpecificHeaderMutationsWins() bool { + if x != nil { + return x.MostSpecificHeaderMutationsWins + } + return false +} + +func (x *RouteConfiguration) GetValidateClusters() *wrapperspb.BoolValue { + if x != nil { + return x.ValidateClusters + } + return nil +} + +func (x *RouteConfiguration) GetMaxDirectResponseBodySizeBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxDirectResponseBodySizeBytes + } + return nil +} + +func (x *RouteConfiguration) GetClusterSpecifierPlugins() []*ClusterSpecifierPlugin { + if x != nil { + return x.ClusterSpecifierPlugins + } + return nil +} + +func (x *RouteConfiguration) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *RouteConfiguration) GetIgnorePortInHostMatching() bool { + if x != nil { + return x.IgnorePortInHostMatching + } + return false +} + +func (x *RouteConfiguration) GetIgnorePathParametersInPathMatching() bool { + if x != nil { + return x.IgnorePathParametersInPathMatching + } + return false +} + +func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *RouteConfiguration) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Vhds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for VHDS. + ConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` +} + +func (x *Vhds) Reset() { + *x = Vhds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Vhds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Vhds) ProtoMessage() {} + +func (x *Vhds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Vhds.ProtoReflect.Descriptor instead. +func (*Vhds) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{1} +} + +func (x *Vhds) GetConfigSource() *v3.ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +var File_envoy_config_route_v3_route_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_route_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0c, 0x0a, 0x12, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x0c, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x2f, 0x0a, + 0x04, 0x76, 0x68, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x52, 0x04, 0x76, 0x68, 0x64, 0x73, 0x12, 0x44, + 0x0a, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, + 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, + 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, + 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, + 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, + 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x57, 0x69, + 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x23, 0x6d, + 0x61, 0x78, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x53, 0x69, 0x7a, + 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x19, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x17, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, + 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x69, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x12, 0x53, 0x0a, 0x27, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x22, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x74, 0x68, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5d, + 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x26, 0x9a, + 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x73, 0x0a, 0x04, 0x56, 0x68, 0x64, 0x73, 0x12, 0x51, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x3a, 0x18, 0x9a, 0xc5, 0x88, 0x1e, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_route_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_route_proto_rawDescData = file_envoy_config_route_v3_route_proto_rawDesc +) + +func file_envoy_config_route_v3_route_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_route_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_route_proto_rawDescData) + }) + return file_envoy_config_route_v3_route_proto_rawDescData +} + +var file_envoy_config_route_v3_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_route_v3_route_proto_goTypes = []interface{}{ + (*RouteConfiguration)(nil), // 0: envoy.config.route.v3.RouteConfiguration + (*Vhds)(nil), // 1: envoy.config.route.v3.Vhds + nil, // 2: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + (*VirtualHost)(nil), // 3: envoy.config.route.v3.VirtualHost + (*v3.HeaderValueOption)(nil), // 4: envoy.config.core.v3.HeaderValueOption + (*wrapperspb.BoolValue)(nil), // 5: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*ClusterSpecifierPlugin)(nil), // 7: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteAction_RequestMirrorPolicy)(nil), // 8: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*v3.ConfigSource)(nil), // 10: envoy.config.core.v3.ConfigSource + (*anypb.Any)(nil), // 11: google.protobuf.Any +} +var file_envoy_config_route_v3_route_proto_depIdxs = []int32{ + 3, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost + 1, // 1: envoy.config.route.v3.RouteConfiguration.vhds:type_name -> envoy.config.route.v3.Vhds + 4, // 2: envoy.config.route.v3.RouteConfiguration.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 4, // 3: envoy.config.route.v3.RouteConfiguration.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 5, // 4: envoy.config.route.v3.RouteConfiguration.validate_clusters:type_name -> google.protobuf.BoolValue + 6, // 5: envoy.config.route.v3.RouteConfiguration.max_direct_response_body_size_bytes:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.config.route.v3.RouteConfiguration.cluster_specifier_plugins:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 8, // 7: envoy.config.route.v3.RouteConfiguration.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 2, // 8: envoy.config.route.v3.RouteConfiguration.typed_per_filter_config:type_name -> envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + 9, // 9: envoy.config.route.v3.RouteConfiguration.metadata:type_name -> envoy.config.core.v3.Metadata + 10, // 10: envoy.config.route.v3.Vhds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 11, // 11: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_route_proto_init() } +func file_envoy_config_route_v3_route_proto_init() { + if File_envoy_config_route_v3_route_proto != nil { + return + } + file_envoy_config_route_v3_route_components_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Vhds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_route_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_route_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_route_proto_depIdxs, + MessageInfos: file_envoy_config_route_v3_route_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_route_proto = out.File + file_envoy_config_route_v3_route_proto_rawDesc = nil + file_envoy_config_route_v3_route_proto_goTypes = nil + file_envoy_config_route_v3_route_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go new file mode 100644 index 000000000..be062e5bd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go @@ -0,0 +1,693 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteConfigurationMultiError, or nil if none found. +func (m *RouteConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + for idx, item := range m.GetVirtualHosts() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetVhds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVhds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetInternalOnlyHeaders() { + _, _ = idx, item + + if !_RouteConfiguration_InternalOnlyHeaders_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("InternalOnlyHeaders[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := RouteConfigurationValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if !_RouteConfiguration_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := RouteConfigurationValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_RouteConfiguration_RequestHeadersToRemove_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for MostSpecificHeaderMutationsWins + + if all { + switch v := interface{}(m.GetValidateClusters()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidateClusters()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetClusterSpecifierPlugins() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for IgnorePortInHostMatching + + // no validation rules for IgnorePathParametersInPathMatching + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteConfigurationMultiError(errors) + } + + return nil +} + +// RouteConfigurationMultiError is an error wrapping multiple validation errors +// returned by RouteConfiguration.ValidateAll() if the designated constraints +// aren't met. +type RouteConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteConfigurationMultiError) AllErrors() []error { return m } + +// RouteConfigurationValidationError is the validation error returned by +// RouteConfiguration.Validate if the designated constraints aren't met. +type RouteConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteConfigurationValidationError) ErrorName() string { + return "RouteConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteConfigurationValidationError{} + +var _RouteConfiguration_InternalOnlyHeaders_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteConfiguration_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteConfiguration_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on Vhds with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Vhds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Vhds with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in VhdsMultiError, or nil if none found. +func (m *Vhds) ValidateAll() error { + return m.validate(true) +} + +func (m *Vhds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := VhdsValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return VhdsMultiError(errors) + } + + return nil +} + +// VhdsMultiError is an error wrapping multiple validation errors returned by +// Vhds.ValidateAll() if the designated constraints aren't met. +type VhdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VhdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VhdsMultiError) AllErrors() []error { return m } + +// VhdsValidationError is the validation error returned by Vhds.Validate if the +// designated constraints aren't met. +type VhdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VhdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VhdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VhdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VhdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VhdsValidationError) ErrorName() string { return "VhdsValidationError" } + +// Error satisfies the builtin error interface +func (e VhdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVhds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VhdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VhdsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go new file mode 100644 index 000000000..bd5ae667f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -0,0 +1,9078 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/cncf/xds/go/xds/type/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VirtualHost_TlsRequirementType int32 + +const ( + // No TLS requirement for the virtual host. + VirtualHost_NONE VirtualHost_TlsRequirementType = 0 + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + VirtualHost_EXTERNAL_ONLY VirtualHost_TlsRequirementType = 1 + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + VirtualHost_ALL VirtualHost_TlsRequirementType = 2 +) + +// Enum value maps for VirtualHost_TlsRequirementType. +var ( + VirtualHost_TlsRequirementType_name = map[int32]string{ + 0: "NONE", + 1: "EXTERNAL_ONLY", + 2: "ALL", + } + VirtualHost_TlsRequirementType_value = map[string]int32{ + "NONE": 0, + "EXTERNAL_ONLY": 1, + "ALL": 2, + } +) + +func (x VirtualHost_TlsRequirementType) Enum() *VirtualHost_TlsRequirementType { + p := new(VirtualHost_TlsRequirementType) + *p = x + return p +} + +func (x VirtualHost_TlsRequirementType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VirtualHost_TlsRequirementType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[0].Descriptor() +} + +func (VirtualHost_TlsRequirementType) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[0] +} + +func (x VirtualHost_TlsRequirementType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VirtualHost_TlsRequirementType.Descriptor instead. +func (VirtualHost_TlsRequirementType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0, 0} +} + +type RouteAction_ClusterNotFoundResponseCode int32 + +const ( + // HTTP status code - 503 Service Unavailable. + RouteAction_SERVICE_UNAVAILABLE RouteAction_ClusterNotFoundResponseCode = 0 + // HTTP status code - 404 Not Found. + RouteAction_NOT_FOUND RouteAction_ClusterNotFoundResponseCode = 1 + // HTTP status code - 500 Internal Server Error. + RouteAction_INTERNAL_SERVER_ERROR RouteAction_ClusterNotFoundResponseCode = 2 +) + +// Enum value maps for RouteAction_ClusterNotFoundResponseCode. +var ( + RouteAction_ClusterNotFoundResponseCode_name = map[int32]string{ + 0: "SERVICE_UNAVAILABLE", + 1: "NOT_FOUND", + 2: "INTERNAL_SERVER_ERROR", + } + RouteAction_ClusterNotFoundResponseCode_value = map[string]int32{ + "SERVICE_UNAVAILABLE": 0, + "NOT_FOUND": 1, + "INTERNAL_SERVER_ERROR": 2, + } +) + +func (x RouteAction_ClusterNotFoundResponseCode) Enum() *RouteAction_ClusterNotFoundResponseCode { + p := new(RouteAction_ClusterNotFoundResponseCode) + *p = x + return p +} + +func (x RouteAction_ClusterNotFoundResponseCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteAction_ClusterNotFoundResponseCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[1].Descriptor() +} + +func (RouteAction_ClusterNotFoundResponseCode) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[1] +} + +func (x RouteAction_ClusterNotFoundResponseCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteAction_ClusterNotFoundResponseCode.Descriptor instead. +func (RouteAction_ClusterNotFoundResponseCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} +} + +// Configures :ref:`internal redirect ` behavior. +// [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] +// +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +type RouteAction_InternalRedirectAction int32 + +const ( + RouteAction_PASS_THROUGH_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 0 + RouteAction_HANDLE_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 1 +) + +// Enum value maps for RouteAction_InternalRedirectAction. +var ( + RouteAction_InternalRedirectAction_name = map[int32]string{ + 0: "PASS_THROUGH_INTERNAL_REDIRECT", + 1: "HANDLE_INTERNAL_REDIRECT", + } + RouteAction_InternalRedirectAction_value = map[string]int32{ + "PASS_THROUGH_INTERNAL_REDIRECT": 0, + "HANDLE_INTERNAL_REDIRECT": 1, + } +) + +func (x RouteAction_InternalRedirectAction) Enum() *RouteAction_InternalRedirectAction { + p := new(RouteAction_InternalRedirectAction) + *p = x + return p +} + +func (x RouteAction_InternalRedirectAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteAction_InternalRedirectAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[2].Descriptor() +} + +func (RouteAction_InternalRedirectAction) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[2] +} + +func (x RouteAction_InternalRedirectAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteAction_InternalRedirectAction.Descriptor instead. +func (RouteAction_InternalRedirectAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} +} + +type RetryPolicy_ResetHeaderFormat int32 + +const ( + RetryPolicy_SECONDS RetryPolicy_ResetHeaderFormat = 0 + RetryPolicy_UNIX_TIMESTAMP RetryPolicy_ResetHeaderFormat = 1 +) + +// Enum value maps for RetryPolicy_ResetHeaderFormat. +var ( + RetryPolicy_ResetHeaderFormat_name = map[int32]string{ + 0: "SECONDS", + 1: "UNIX_TIMESTAMP", + } + RetryPolicy_ResetHeaderFormat_value = map[string]int32{ + "SECONDS": 0, + "UNIX_TIMESTAMP": 1, + } +) + +func (x RetryPolicy_ResetHeaderFormat) Enum() *RetryPolicy_ResetHeaderFormat { + p := new(RetryPolicy_ResetHeaderFormat) + *p = x + return p +} + +func (x RetryPolicy_ResetHeaderFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RetryPolicy_ResetHeaderFormat) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[3].Descriptor() +} + +func (RetryPolicy_ResetHeaderFormat) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[3] +} + +func (x RetryPolicy_ResetHeaderFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RetryPolicy_ResetHeaderFormat.Descriptor instead. +func (RetryPolicy_ResetHeaderFormat) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} +} + +type RedirectAction_RedirectResponseCode int32 + +const ( + // Moved Permanently HTTP Status Code - 301. + RedirectAction_MOVED_PERMANENTLY RedirectAction_RedirectResponseCode = 0 + // Found HTTP Status Code - 302. + RedirectAction_FOUND RedirectAction_RedirectResponseCode = 1 + // See Other HTTP Status Code - 303. + RedirectAction_SEE_OTHER RedirectAction_RedirectResponseCode = 2 + // Temporary Redirect HTTP Status Code - 307. + RedirectAction_TEMPORARY_REDIRECT RedirectAction_RedirectResponseCode = 3 + // Permanent Redirect HTTP Status Code - 308. + RedirectAction_PERMANENT_REDIRECT RedirectAction_RedirectResponseCode = 4 +) + +// Enum value maps for RedirectAction_RedirectResponseCode. +var ( + RedirectAction_RedirectResponseCode_name = map[int32]string{ + 0: "MOVED_PERMANENTLY", + 1: "FOUND", + 2: "SEE_OTHER", + 3: "TEMPORARY_REDIRECT", + 4: "PERMANENT_REDIRECT", + } + RedirectAction_RedirectResponseCode_value = map[string]int32{ + "MOVED_PERMANENTLY": 0, + "FOUND": 1, + "SEE_OTHER": 2, + "TEMPORARY_REDIRECT": 3, + "PERMANENT_REDIRECT": 4, + } +) + +func (x RedirectAction_RedirectResponseCode) Enum() *RedirectAction_RedirectResponseCode { + p := new(RedirectAction_RedirectResponseCode) + *p = x + return p +} + +func (x RedirectAction_RedirectResponseCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RedirectAction_RedirectResponseCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[4].Descriptor() +} + +func (RedirectAction_RedirectResponseCode) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[4] +} + +func (x RedirectAction_RedirectResponseCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RedirectAction_RedirectResponseCode.Descriptor instead. +func (RedirectAction_RedirectResponseCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11, 0} +} + +type RateLimit_Action_MetaData_Source int32 + +const ( + // Query :ref:`dynamic metadata ` + RateLimit_Action_MetaData_DYNAMIC RateLimit_Action_MetaData_Source = 0 + // Query :ref:`route entry metadata ` + RateLimit_Action_MetaData_ROUTE_ENTRY RateLimit_Action_MetaData_Source = 1 +) + +// Enum value maps for RateLimit_Action_MetaData_Source. +var ( + RateLimit_Action_MetaData_Source_name = map[int32]string{ + 0: "DYNAMIC", + 1: "ROUTE_ENTRY", + } + RateLimit_Action_MetaData_Source_value = map[string]int32{ + "DYNAMIC": 0, + "ROUTE_ENTRY": 1, + } +) + +func (x RateLimit_Action_MetaData_Source) Enum() *RateLimit_Action_MetaData_Source { + p := new(RateLimit_Action_MetaData_Source) + *p = x + return p +} + +func (x RateLimit_Action_MetaData_Source) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimit_Action_MetaData_Source) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[5].Descriptor() +} + +func (RateLimit_Action_MetaData_Source) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[5] +} + +func (x RateLimit_Action_MetaData_Source) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimit_Action_MetaData_Source.Descriptor instead. +func (RateLimit_Action_MetaData_Source) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8, 0} +} + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 25] +type VirtualHost struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: “www.foo.com“. + // 2. Suffix domain wildcards: “*.foo.com“ or “*-bar.foo.com“. + // 3. Prefix domain wildcards: “foo.*“ or “foo-*“. + // 4. Special wildcard “*“ matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + // + // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. + Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + // Only one of this and “matcher“ can be specified. + Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"` + // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] + // The match tree to use when resolving route actions for incoming requests. Only one of this and “routes“ + // can be specified. + Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + RequireTls VirtualHost_TlsRequirementType `protobuf:"varint,4,opt,name=require_tls,json=requireTls,proto3,enum=envoy.config.route.v3.VirtualHost_TlsRequirementType" json:"require_tls,omitempty"` + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + VirtualClusters []*VirtualCluster `protobuf:"bytes,5,rep,name=virtual_clusters,json=virtualClusters,proto3" json:"virtual_clusters,omitempty"` + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + RateLimits []*RateLimit `protobuf:"bytes,6,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,7,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + RequestHeadersToRemove []string `protobuf:"bytes,13,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,10,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + // found in the + // :ref:`VirtualHost.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`VirtualHost.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + Cors *CorsPolicy `protobuf:"bytes,8,opt,name=cors,proto3" json:"cors,omitempty"` + // This field can be used to provide virtual host level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + // + // [#next-major-version: rename to include_attempt_count_in_request.] + IncludeRequestAttemptCount bool `protobuf:"varint,14,opt,name=include_request_attempt_count,json=includeRequestAttemptCount,proto3" json:"include_request_attempt_count,omitempty"` + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the downstream response. Setting this option will cause the router to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the downstream + // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + IncludeAttemptCountInResponse bool `protobuf:"varint,19,opt,name=include_attempt_count_in_response,json=includeAttemptCountInResponse,proto3" json:"include_attempt_count_in_response,omitempty"` + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy *RetryPolicy `protobuf:"bytes,16,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that setting a route level entry + // will take precedence over this config and it'll be treated independently (e.g.: values are not + // inherited). :ref:`Retry policy ` should not be + // set if this field is used. + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy *HedgePolicy `protobuf:"bytes,17,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"` + // Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + // request header in retries initiated by per try timeouts. + IncludeIsTimeoutRetryHeader bool `protobuf:"varint,23,opt,name=include_is_timeout_retry_header,json=includeIsTimeoutRetryHeader,proto3" json:"include_is_timeout_retry_header,omitempty"` + // The maximum bytes which will be buffered for retries and shadowing. + // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum + // value of this and the listener per_connection_buffer_limit_bytes. + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + // Specify a set of default request mirroring policies for every route under this virtual host. + // It takes precedence over the route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,22,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // The metadata field can be used to provide additional information + // about the virtual host. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v31.Metadata `protobuf:"bytes,24,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *VirtualHost) Reset() { + *x = VirtualHost{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualHost) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualHost) ProtoMessage() {} + +func (x *VirtualHost) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualHost.ProtoReflect.Descriptor instead. +func (*VirtualHost) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0} +} + +func (x *VirtualHost) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *VirtualHost) GetDomains() []string { + if x != nil { + return x.Domains + } + return nil +} + +func (x *VirtualHost) GetRoutes() []*Route { + if x != nil { + return x.Routes + } + return nil +} + +func (x *VirtualHost) GetMatcher() *v3.Matcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *VirtualHost) GetRequireTls() VirtualHost_TlsRequirementType { + if x != nil { + return x.RequireTls + } + return VirtualHost_NONE +} + +func (x *VirtualHost) GetVirtualClusters() []*VirtualCluster { + if x != nil { + return x.VirtualClusters + } + return nil +} + +func (x *VirtualHost) GetRateLimits() []*RateLimit { + if x != nil { + return x.RateLimits + } + return nil +} + +func (x *VirtualHost) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *VirtualHost) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *VirtualHost) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *VirtualHost) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *VirtualHost) GetCors() *CorsPolicy { + if x != nil { + return x.Cors + } + return nil +} + +func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *VirtualHost) GetIncludeRequestAttemptCount() bool { + if x != nil { + return x.IncludeRequestAttemptCount + } + return false +} + +func (x *VirtualHost) GetIncludeAttemptCountInResponse() bool { + if x != nil { + return x.IncludeAttemptCountInResponse + } + return false +} + +func (x *VirtualHost) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *VirtualHost) GetRetryPolicyTypedConfig() *anypb.Any { + if x != nil { + return x.RetryPolicyTypedConfig + } + return nil +} + +func (x *VirtualHost) GetHedgePolicy() *HedgePolicy { + if x != nil { + return x.HedgePolicy + } + return nil +} + +func (x *VirtualHost) GetIncludeIsTimeoutRetryHeader() bool { + if x != nil { + return x.IncludeIsTimeoutRetryHeader + } + return false +} + +func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerRequestBufferLimitBytes + } + return nil +} + +func (x *VirtualHost) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *VirtualHost) GetMetadata() *v31.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// A filter-defined action type. +type FilterAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Action *anypb.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` +} + +func (x *FilterAction) Reset() { + *x = FilterAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterAction) ProtoMessage() {} + +func (x *FilterAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterAction.ProtoReflect.Descriptor instead. +func (*FilterAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{1} +} + +func (x *FilterAction) GetAction() *anypb.Any { + if x != nil { + return x.Action + } + return nil +} + +// This can be used in route matcher :ref:`VirtualHost.matcher `. +// When the matcher matches, routes will be matched and run. +type RouteList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of routes that will be matched and run, in order. The first route that matches will be used. + Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"` +} + +func (x *RouteList) Reset() { + *x = RouteList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteList) ProtoMessage() {} + +func (x *RouteList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteList.ProtoReflect.Descriptor instead. +func (*RouteList) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{2} +} + +func (x *RouteList) GetRoutes() []*Route { + if x != nil { + return x.Routes + } + return nil +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// +// [#next-free-field: 20] +type Route struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name for the route. + Name string `protobuf:"bytes,14,opt,name=name,proto3" json:"name,omitempty"` + // Route matching parameters. + Match *RouteMatch `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // Types that are assignable to Action: + // + // *Route_Route + // *Route_Redirect + // *Route_DirectResponse + // *Route_FilterAction + // *Route_NonForwardingAction + Action isRoute_Action `protobuf_oneof:"action"` + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v31.Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Decorator for the matched route. + Decorator *Decorator `protobuf:"bytes,5,opt,name=decorator,proto3" json:"decorator,omitempty"` + // This field can be used to provide route specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,9,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + RequestHeadersToRemove []string `protobuf:"bytes,12,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,10,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing *Tracing `protobuf:"bytes,15,opt,name=tracing,proto3" json:"tracing,omitempty"` + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + // The human readable prefix to use when emitting statistics for this endpoint. + // The statistics are rooted at vhost..route.. + // This should be set for highly critical + // endpoints that one wishes to get “per-route” statistics on. + // If not set, endpoint statistics are not generated. + // + // The emitted statistics are the same as those documented for :ref:`virtual clusters `. + // + // .. warning:: + // + // We do not recommend setting up a stat prefix for + // every application endpoint. This is both not easily maintainable and + // statistics use a non-trivial amount of memory(approximately 1KiB per route). + StatPrefix string `protobuf:"bytes,19,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` +} + +func (x *Route) Reset() { + *x = Route{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Route) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Route) ProtoMessage() {} + +func (x *Route) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Route.ProtoReflect.Descriptor instead. +func (*Route) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{3} +} + +func (x *Route) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Route) GetMatch() *RouteMatch { + if x != nil { + return x.Match + } + return nil +} + +func (m *Route) GetAction() isRoute_Action { + if m != nil { + return m.Action + } + return nil +} + +func (x *Route) GetRoute() *RouteAction { + if x, ok := x.GetAction().(*Route_Route); ok { + return x.Route + } + return nil +} + +func (x *Route) GetRedirect() *RedirectAction { + if x, ok := x.GetAction().(*Route_Redirect); ok { + return x.Redirect + } + return nil +} + +func (x *Route) GetDirectResponse() *DirectResponseAction { + if x, ok := x.GetAction().(*Route_DirectResponse); ok { + return x.DirectResponse + } + return nil +} + +func (x *Route) GetFilterAction() *FilterAction { + if x, ok := x.GetAction().(*Route_FilterAction); ok { + return x.FilterAction + } + return nil +} + +func (x *Route) GetNonForwardingAction() *NonForwardingAction { + if x, ok := x.GetAction().(*Route_NonForwardingAction); ok { + return x.NonForwardingAction + } + return nil +} + +func (x *Route) GetMetadata() *v31.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Route) GetDecorator() *Decorator { + if x != nil { + return x.Decorator + } + return nil +} + +func (x *Route) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *Route) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *Route) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *Route) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *Route) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *Route) GetTracing() *Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *Route) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerRequestBufferLimitBytes + } + return nil +} + +func (x *Route) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +type isRoute_Action interface { + isRoute_Action() +} + +type Route_Route struct { + // Route request to some upstream cluster. + Route *RouteAction `protobuf:"bytes,2,opt,name=route,proto3,oneof"` +} + +type Route_Redirect struct { + // Return a redirect. + Redirect *RedirectAction `protobuf:"bytes,3,opt,name=redirect,proto3,oneof"` +} + +type Route_DirectResponse struct { + // Return an arbitrary HTTP response directly, without proxying. + DirectResponse *DirectResponseAction `protobuf:"bytes,7,opt,name=direct_response,json=directResponse,proto3,oneof"` +} + +type Route_FilterAction struct { + // [#not-implemented-hide:] + // A filter-defined action (e.g., it could dynamically generate the RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] + FilterAction *FilterAction `protobuf:"bytes,17,opt,name=filter_action,json=filterAction,proto3,oneof"` +} + +type Route_NonForwardingAction struct { + // [#not-implemented-hide:] + // An action used when the route will generate a response directly, + // without forwarding to an upstream host. This will be used in non-proxy + // xDS clients like the gRPC server. It could also be used in the future + // in Envoy for a filter that directly generates responses for requests. + NonForwardingAction *NonForwardingAction `protobuf:"bytes,18,opt,name=non_forwarding_action,json=nonForwardingAction,proto3,oneof"` +} + +func (*Route_Route) isRoute_Action() {} + +func (*Route_Redirect) isRoute_Action() {} + +func (*Route_DirectResponse) isRoute_Action() {} + +func (*Route_FilterAction) isRoute_Action() {} + +func (*Route_NonForwardingAction) isRoute_Action() {} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +type WeightedCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies one or more upstream clusters associated with the route. + Clusters []*WeightedCluster_ClusterWeight `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, if this is greater than 0. + // This field is now deprecated, and the client will use the sum of all + // cluster weights. It is up to the management server to supply the correct weights. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + TotalWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=total_weight,json=totalWeight,proto3" json:"total_weight,omitempty"` + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the “runtime_key_prefix“ is + // specified, the router will look for weights associated with each upstream + // cluster under the key “runtime_key_prefix“ + “.“ + “cluster[i].name“ where + // “cluster[i]“ denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + RuntimeKeyPrefix string `protobuf:"bytes,2,opt,name=runtime_key_prefix,json=runtimeKeyPrefix,proto3" json:"runtime_key_prefix,omitempty"` + // Types that are assignable to RandomValueSpecifier: + // + // *WeightedCluster_HeaderName + RandomValueSpecifier isWeightedCluster_RandomValueSpecifier `protobuf_oneof:"random_value_specifier"` +} + +func (x *WeightedCluster) Reset() { + *x = WeightedCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WeightedCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WeightedCluster) ProtoMessage() {} + +func (x *WeightedCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WeightedCluster.ProtoReflect.Descriptor instead. +func (*WeightedCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4} +} + +func (x *WeightedCluster) GetClusters() []*WeightedCluster_ClusterWeight { + if x != nil { + return x.Clusters + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *WeightedCluster) GetTotalWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.TotalWeight + } + return nil +} + +func (x *WeightedCluster) GetRuntimeKeyPrefix() string { + if x != nil { + return x.RuntimeKeyPrefix + } + return "" +} + +func (m *WeightedCluster) GetRandomValueSpecifier() isWeightedCluster_RandomValueSpecifier { + if m != nil { + return m.RandomValueSpecifier + } + return nil +} + +func (x *WeightedCluster) GetHeaderName() string { + if x, ok := x.GetRandomValueSpecifier().(*WeightedCluster_HeaderName); ok { + return x.HeaderName + } + return "" +} + +type isWeightedCluster_RandomValueSpecifier interface { + isWeightedCluster_RandomValueSpecifier() +} + +type WeightedCluster_HeaderName struct { + // Specifies the header name that is used to look up the random value passed in the request header. + // This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic. + // If header is not present or invalid, Envoy will fall back to use the internally generated random value. + // This header is expected to be single-valued header as we only want to have one selected value throughout + // the process for the consistency. And the value is a unsigned number between 0 and UINT64_MAX. + HeaderName string `protobuf:"bytes,4,opt,name=header_name,json=headerName,proto3,oneof"` +} + +func (*WeightedCluster_HeaderName) isWeightedCluster_RandomValueSpecifier() {} + +// Configuration for a cluster specifier plugin. +type ClusterSpecifierPlugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the plugin and its opaque configuration. + Extension *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=extension,proto3" json:"extension,omitempty"` + // If is_optional is not set or is set to false and the plugin defined by this message is not a + // supported type, the containing resource is NACKed. If is_optional is set to true, the resource + // would not be NACKed for this reason. In this case, routes referencing this plugin's name would + // not be treated as an illegal configuration, but would result in a failure if the route is + // selected. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` +} + +func (x *ClusterSpecifierPlugin) Reset() { + *x = ClusterSpecifierPlugin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterSpecifierPlugin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterSpecifierPlugin) ProtoMessage() {} + +func (x *ClusterSpecifierPlugin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterSpecifierPlugin.ProtoReflect.Descriptor instead. +func (*ClusterSpecifierPlugin) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5} +} + +func (x *ClusterSpecifierPlugin) GetExtension() *v31.TypedExtensionConfig { + if x != nil { + return x.Extension + } + return nil +} + +func (x *ClusterSpecifierPlugin) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +// [#next-free-field: 16] +type RouteMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PathSpecifier: + // + // *RouteMatch_Prefix + // *RouteMatch_Path + // *RouteMatch_SafeRegex + // *RouteMatch_ConnectMatcher_ + // *RouteMatch_PathSeparatedPrefix + // *RouteMatch_PathMatchPolicy + PathSpecifier isRouteMatch_PathSpecifier `protobuf_oneof:"path_specifier"` + // Indicates that prefix/path matching should be case sensitive. The default + // is true. Ignored for safe_regex matching. + CaseSensitive *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=case_sensitive,json=caseSensitive,proto3" json:"case_sensitive,omitempty"` + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + RuntimeFraction *v31.RuntimeFractionalPercent `protobuf:"bytes,9,opt,name=runtime_fraction,json=runtimeFraction,proto3" json:"runtime_fraction,omitempty"` + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + Headers []*HeaderMatcher `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"` + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the “path“ header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the “path“ header's + // query string for a match to occur. In the event query parameters are + // repeated, only the first value for each key will be considered. + // + // .. note:: + // + // If query parameters are used to pass request message fields when + // `grpc_json_transcoder `_ + // is used, the transcoded message fields maybe different. The query parameters are + // url encoded, but the message fields are not. For example, if a query + // parameter is "foo%20bar", the message field will be "foo bar". + QueryParameters []*QueryParameterMatcher `protobuf:"bytes,7,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + Grpc *RouteMatch_GrpcRouteMatchOptions `protobuf:"bytes,8,opt,name=grpc,proto3" json:"grpc,omitempty"` + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContext *RouteMatch_TlsContextMatchOptions `protobuf:"bytes,11,opt,name=tls_context,json=tlsContext,proto3" json:"tls_context,omitempty"` + // Specifies a set of dynamic metadata matchers on which the route should match. + // The router will check the dynamic metadata against all the specified dynamic metadata matchers. + // If the number of specified dynamic metadata matchers is nonzero, they all must match the + // dynamic metadata for a match to occur. + DynamicMetadata []*v32.MetadataMatcher `protobuf:"bytes,13,rep,name=dynamic_metadata,json=dynamicMetadata,proto3" json:"dynamic_metadata,omitempty"` +} + +func (x *RouteMatch) Reset() { + *x = RouteMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch) ProtoMessage() {} + +func (x *RouteMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch.ProtoReflect.Descriptor instead. +func (*RouteMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6} +} + +func (m *RouteMatch) GetPathSpecifier() isRouteMatch_PathSpecifier { + if m != nil { + return m.PathSpecifier + } + return nil +} + +func (x *RouteMatch) GetPrefix() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *RouteMatch) GetPath() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_Path); ok { + return x.Path + } + return "" +} + +func (x *RouteMatch) GetSafeRegex() *v32.RegexMatcher { + if x, ok := x.GetPathSpecifier().(*RouteMatch_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *RouteMatch) GetConnectMatcher() *RouteMatch_ConnectMatcher { + if x, ok := x.GetPathSpecifier().(*RouteMatch_ConnectMatcher_); ok { + return x.ConnectMatcher + } + return nil +} + +func (x *RouteMatch) GetPathSeparatedPrefix() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_PathSeparatedPrefix); ok { + return x.PathSeparatedPrefix + } + return "" +} + +func (x *RouteMatch) GetPathMatchPolicy() *v31.TypedExtensionConfig { + if x, ok := x.GetPathSpecifier().(*RouteMatch_PathMatchPolicy); ok { + return x.PathMatchPolicy + } + return nil +} + +func (x *RouteMatch) GetCaseSensitive() *wrapperspb.BoolValue { + if x != nil { + return x.CaseSensitive + } + return nil +} + +func (x *RouteMatch) GetRuntimeFraction() *v31.RuntimeFractionalPercent { + if x != nil { + return x.RuntimeFraction + } + return nil +} + +func (x *RouteMatch) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *RouteMatch) GetQueryParameters() []*QueryParameterMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +func (x *RouteMatch) GetGrpc() *RouteMatch_GrpcRouteMatchOptions { + if x != nil { + return x.Grpc + } + return nil +} + +func (x *RouteMatch) GetTlsContext() *RouteMatch_TlsContextMatchOptions { + if x != nil { + return x.TlsContext + } + return nil +} + +func (x *RouteMatch) GetDynamicMetadata() []*v32.MetadataMatcher { + if x != nil { + return x.DynamicMetadata + } + return nil +} + +type isRouteMatch_PathSpecifier interface { + isRouteMatch_PathSpecifier() +} + +type RouteMatch_Prefix struct { + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the “:path“ header. + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3,oneof"` +} + +type RouteMatch_Path struct { + // If specified, the route is an exact path rule meaning that the path must + // exactly match the “:path“ header once the query string is removed. + Path string `protobuf:"bytes,2,opt,name=path,proto3,oneof"` +} + +type RouteMatch_SafeRegex struct { + // If specified, the route is a regular expression rule meaning that the + // regex must match the “:path“ header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the “:path“ header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + SafeRegex *v32.RegexMatcher `protobuf:"bytes,10,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type RouteMatch_ConnectMatcher_ struct { + // If this is used as the matcher, the matcher will only match CONNECT or CONNECT-UDP requests. + // Note that this will not match other Extended CONNECT requests (WebSocket and the like) as + // they are normalized in Envoy as HTTP/1.1 style upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2 and HTTP/3, + // where Extended CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectMatcher *RouteMatch_ConnectMatcher `protobuf:"bytes,12,opt,name=connect_matcher,json=connectMatcher,proto3,oneof"` +} + +type RouteMatch_PathSeparatedPrefix struct { + // If specified, the route is a path-separated prefix rule meaning that the + // “:path“ header (without the query string) must either exactly match the + // “path_separated_prefix“ or have it as a prefix, followed by “/“ + // + // For example, “/api/dev“ would match + // “/api/dev“, “/api/dev/“, “/api/dev/v1“, and “/api/dev?param=true“ + // but would not match “/api/developer“ + // + // Expect the value to not contain “?“ or “#“ and not to end in “/“ + PathSeparatedPrefix string `protobuf:"bytes,14,opt,name=path_separated_prefix,json=pathSeparatedPrefix,proto3,oneof"` +} + +type RouteMatch_PathMatchPolicy struct { + // [#extension-category: envoy.path.match] + PathMatchPolicy *v31.TypedExtensionConfig `protobuf:"bytes,15,opt,name=path_match_policy,json=pathMatchPolicy,proto3,oneof"` +} + +func (*RouteMatch_Prefix) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_Path) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_SafeRegex) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_ConnectMatcher_) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_PathSeparatedPrefix) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_PathMatchPolicy) isRouteMatch_PathSpecifier() {} + +// Cors policy configuration. +// +// .. attention:: +// +// This message has been deprecated. Please use +// :ref:`CorsPolicy in filter extension ` +// as as alternative. +// +// [#next-free-field: 14] +type CorsPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + AllowOriginStringMatch []*v32.StringMatcher `protobuf:"bytes,11,rep,name=allow_origin_string_match,json=allowOriginStringMatch,proto3" json:"allow_origin_string_match,omitempty"` + // Specifies the content for the “access-control-allow-methods“ header. + AllowMethods string `protobuf:"bytes,2,opt,name=allow_methods,json=allowMethods,proto3" json:"allow_methods,omitempty"` + // Specifies the content for the “access-control-allow-headers“ header. + AllowHeaders string `protobuf:"bytes,3,opt,name=allow_headers,json=allowHeaders,proto3" json:"allow_headers,omitempty"` + // Specifies the content for the “access-control-expose-headers“ header. + ExposeHeaders string `protobuf:"bytes,4,opt,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` + // Specifies the content for the “access-control-max-age“ header. + MaxAge string `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // Specifies whether the resource allows credentials. + AllowCredentials *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` + // Types that are assignable to EnabledSpecifier: + // + // *CorsPolicy_FilterEnabled + EnabledSpecifier isCorsPolicy_EnabledSpecifier `protobuf_oneof:"enabled_specifier"` + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when “filter_enabled“ and “enabled“ are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's “Origin“ to determine if it's valid but will not enforce any policies. + ShadowEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,10,opt,name=shadow_enabled,json=shadowEnabled,proto3" json:"shadow_enabled,omitempty"` + // Specify whether allow requests whose target server's IP address is more private than that from + // which the request initiator was fetched. + // + // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + AllowPrivateNetworkAccess *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=allow_private_network_access,json=allowPrivateNetworkAccess,proto3" json:"allow_private_network_access,omitempty"` + // Specifies if preflight requests not matching the configured allowed origin should be forwarded + // to the upstream. Default is true. + ForwardNotMatchingPreflights *wrapperspb.BoolValue `protobuf:"bytes,13,opt,name=forward_not_matching_preflights,json=forwardNotMatchingPreflights,proto3" json:"forward_not_matching_preflights,omitempty"` +} + +func (x *CorsPolicy) Reset() { + *x = CorsPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CorsPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CorsPolicy) ProtoMessage() {} + +func (x *CorsPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CorsPolicy.ProtoReflect.Descriptor instead. +func (*CorsPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7} +} + +func (x *CorsPolicy) GetAllowOriginStringMatch() []*v32.StringMatcher { + if x != nil { + return x.AllowOriginStringMatch + } + return nil +} + +func (x *CorsPolicy) GetAllowMethods() string { + if x != nil { + return x.AllowMethods + } + return "" +} + +func (x *CorsPolicy) GetAllowHeaders() string { + if x != nil { + return x.AllowHeaders + } + return "" +} + +func (x *CorsPolicy) GetExposeHeaders() string { + if x != nil { + return x.ExposeHeaders + } + return "" +} + +func (x *CorsPolicy) GetMaxAge() string { + if x != nil { + return x.MaxAge + } + return "" +} + +func (x *CorsPolicy) GetAllowCredentials() *wrapperspb.BoolValue { + if x != nil { + return x.AllowCredentials + } + return nil +} + +func (m *CorsPolicy) GetEnabledSpecifier() isCorsPolicy_EnabledSpecifier { + if m != nil { + return m.EnabledSpecifier + } + return nil +} + +func (x *CorsPolicy) GetFilterEnabled() *v31.RuntimeFractionalPercent { + if x, ok := x.GetEnabledSpecifier().(*CorsPolicy_FilterEnabled); ok { + return x.FilterEnabled + } + return nil +} + +func (x *CorsPolicy) GetShadowEnabled() *v31.RuntimeFractionalPercent { + if x != nil { + return x.ShadowEnabled + } + return nil +} + +func (x *CorsPolicy) GetAllowPrivateNetworkAccess() *wrapperspb.BoolValue { + if x != nil { + return x.AllowPrivateNetworkAccess + } + return nil +} + +func (x *CorsPolicy) GetForwardNotMatchingPreflights() *wrapperspb.BoolValue { + if x != nil { + return x.ForwardNotMatchingPreflights + } + return nil +} + +type isCorsPolicy_EnabledSpecifier interface { + isCorsPolicy_EnabledSpecifier() +} + +type CorsPolicy_FilterEnabled struct { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither “enabled“, “filter_enabled“, nor “shadow_enabled“ are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + FilterEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,9,opt,name=filter_enabled,json=filterEnabled,proto3,oneof"` +} + +func (*CorsPolicy_FilterEnabled) isCorsPolicy_EnabledSpecifier() {} + +// [#next-free-field: 42] +type RouteAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ClusterSpecifier: + // + // *RouteAction_Cluster + // *RouteAction_ClusterHeader + // *RouteAction_WeightedClusters + // *RouteAction_ClusterSpecifierPlugin + // *RouteAction_InlineClusterSpecifierPlugin + ClusterSpecifier isRouteAction_ClusterSpecifier `protobuf_oneof:"cluster_specifier"` + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode RouteAction_ClusterNotFoundResponseCode `protobuf:"varint,20,opt,name=cluster_not_found_response_code,json=clusterNotFoundResponseCode,proto3,enum=envoy.config.route.v3.RouteAction_ClusterNotFoundResponseCode" json:"cluster_not_found_response_code,omitempty"` + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as “envoy.lb“. + MetadataMatch *v31.Metadata `protobuf:"bytes,4,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite ` + // :ref:`path_rewrite_policy `, + // or :ref:`prefix_rewrite ` may be specified. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + // requests to ``/prefix/etc`` will be stripped to ``/etc``. + PrefixRewrite string `protobuf:"bytes,5,opt,name=prefix_rewrite,json=prefixRewrite,proto3" json:"prefix_rewrite,omitempty"` + // Indicates that during forwarding, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. The router filter will place the original path as it was + // before the rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite `, + // :ref:`prefix_rewrite `, or + // :ref:`path_rewrite_policy `] + // may be specified. + // + // Examples using Google's `RE2 `_ engine: + // + // - The path pattern “^/service/([^/]+)(/.*)$“ paired with a substitution + // string of “\2/instance/\1“ would transform “/service/foo/v1/api“ + // into “/v1/api/instance/foo“. + // + // - The pattern “one“ paired with a substitution string of “two“ would + // transform “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/two/zzz“. + // + // - The pattern “^(.*?)one(.*)$“ paired with a substitution string of + // “\1two\2“ would replace only the first occurrence of “one“, + // transforming path “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/one/zzz“. + // + // - The pattern “(?i)/xxx/“ paired with a substitution string of “/yyy/“ + // would do a case-insensitive match and transform path “/aaa/XxX/bbb“ to + // “/aaa/yyy/bbb“. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,32,opt,name=regex_rewrite,json=regexRewrite,proto3" json:"regex_rewrite,omitempty"` + // [#extension-category: envoy.path.rewrite] + PathRewritePolicy *v31.TypedExtensionConfig `protobuf:"bytes,41,opt,name=path_rewrite_policy,json=pathRewritePolicy,proto3" json:"path_rewrite_policy,omitempty"` + // Types that are assignable to HostRewriteSpecifier: + // + // *RouteAction_HostRewriteLiteral + // *RouteAction_AutoHostRewrite + // *RouteAction_HostRewriteHeader + // *RouteAction_HostRewritePathRegex + HostRewriteSpecifier isRouteAction_HostRewriteSpecifier `protobuf_oneof:"host_rewrite_specifier"` + // If set, then a host rewrite action (one of + // :ref:`host_rewrite_literal `, + // :ref:`auto_host_rewrite `, + // :ref:`host_rewrite_header `, or + // :ref:`host_rewrite_path_regex `) + // causes the original value of the host header, if any, to be appended to the + // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + AppendXForwardedHost bool `protobuf:"varint,38,opt,name=append_x_forwarded_host,json=appendXForwardedHost,proto3" json:"append_x_forwarded_host,omitempty"` + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled according to the value for + // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + IdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // Specifies how to send request over TLS early data. + // If absent, allows `safe HTTP requests `_ to be sent on early data. + // [#extension-category: envoy.route.early_data_policy] + EarlyDataPolicy *v31.TypedExtensionConfig `protobuf:"bytes,40,opt,name=early_data_policy,json=earlyDataPolicy,proto3" json:"early_data_policy,omitempty"` + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy *RetryPolicy `protobuf:"bytes,9,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that if this is set, it'll take + // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + // most internal one becomes the enforced policy). :ref:`Retry policy ` + // should not be set if this field is used. + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + // Specify a set of route request mirroring policies. + // It takes precedence over the virtual host and route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,30,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // Optionally specifies the :ref:`routing priority `. + Priority v31.RoutingPriority `protobuf:"varint,11,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"` + // Specifies a set of rate limit configurations that could be applied to the + // route. + RateLimits []*RateLimit `protobuf:"bytes,13,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + IncludeVhRateLimits *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=include_vh_rate_limits,json=includeVhRateLimits,proto3" json:"include_vh_rate_limits,omitempty"` + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + HashPolicy []*RouteAction_HashPolicy `protobuf:"bytes,15,rep,name=hash_policy,json=hashPolicy,proto3" json:"hash_policy,omitempty"` + // Indicates that the route has a CORS policy. This field is ignored if related cors policy is + // found in the :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + Cors *CorsPolicy `protobuf:"bytes,17,opt,name=cors,proto3" json:"cors,omitempty"` + // Deprecated by :ref:`grpc_timeout_header_max ` + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the “grpc-timeout“ header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + // + // .. note:: + // + // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + // precedence over `grpc-timeout header `_, when + // both are present. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + MaxGrpcTimeout *durationpb.Duration `protobuf:"bytes,23,opt,name=max_grpc_timeout,json=maxGrpcTimeout,proto3" json:"max_grpc_timeout,omitempty"` + // Deprecated by :ref:`grpc_timeout_header_offset `. + // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + GrpcTimeoutOffset *durationpb.Duration `protobuf:"bytes,28,opt,name=grpc_timeout_offset,json=grpcTimeoutOffset,proto3" json:"grpc_timeout_offset,omitempty"` + UpgradeConfigs []*RouteAction_UpgradeConfig `protobuf:"bytes,25,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy *InternalRedirectPolicy `protobuf:"bytes,34,opt,name=internal_redirect_policy,json=internalRedirectPolicy,proto3" json:"internal_redirect_policy,omitempty"` + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + InternalRedirectAction RouteAction_InternalRedirectAction `protobuf:"varint,26,opt,name=internal_redirect_action,json=internalRedirectAction,proto3,enum=envoy.config.route.v3.RouteAction_InternalRedirectAction" json:"internal_redirect_action,omitempty"` + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,31,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy *HedgePolicy `protobuf:"bytes,27,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"` + // Specifies the maximum stream duration for this route. + MaxStreamDuration *RouteAction_MaxStreamDuration `protobuf:"bytes,36,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` +} + +func (x *RouteAction) Reset() { + *x = RouteAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction) ProtoMessage() {} + +func (x *RouteAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction.ProtoReflect.Descriptor instead. +func (*RouteAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8} +} + +func (m *RouteAction) GetClusterSpecifier() isRouteAction_ClusterSpecifier { + if m != nil { + return m.ClusterSpecifier + } + return nil +} + +func (x *RouteAction) GetCluster() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_Cluster); ok { + return x.Cluster + } + return "" +} + +func (x *RouteAction) GetClusterHeader() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_ClusterHeader); ok { + return x.ClusterHeader + } + return "" +} + +func (x *RouteAction) GetWeightedClusters() *WeightedCluster { + if x, ok := x.GetClusterSpecifier().(*RouteAction_WeightedClusters); ok { + return x.WeightedClusters + } + return nil +} + +func (x *RouteAction) GetClusterSpecifierPlugin() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_ClusterSpecifierPlugin); ok { + return x.ClusterSpecifierPlugin + } + return "" +} + +func (x *RouteAction) GetInlineClusterSpecifierPlugin() *ClusterSpecifierPlugin { + if x, ok := x.GetClusterSpecifier().(*RouteAction_InlineClusterSpecifierPlugin); ok { + return x.InlineClusterSpecifierPlugin + } + return nil +} + +func (x *RouteAction) GetClusterNotFoundResponseCode() RouteAction_ClusterNotFoundResponseCode { + if x != nil { + return x.ClusterNotFoundResponseCode + } + return RouteAction_SERVICE_UNAVAILABLE +} + +func (x *RouteAction) GetMetadataMatch() *v31.Metadata { + if x != nil { + return x.MetadataMatch + } + return nil +} + +func (x *RouteAction) GetPrefixRewrite() string { + if x != nil { + return x.PrefixRewrite + } + return "" +} + +func (x *RouteAction) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x != nil { + return x.RegexRewrite + } + return nil +} + +func (x *RouteAction) GetPathRewritePolicy() *v31.TypedExtensionConfig { + if x != nil { + return x.PathRewritePolicy + } + return nil +} + +func (m *RouteAction) GetHostRewriteSpecifier() isRouteAction_HostRewriteSpecifier { + if m != nil { + return m.HostRewriteSpecifier + } + return nil +} + +func (x *RouteAction) GetHostRewriteLiteral() string { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewriteLiteral); ok { + return x.HostRewriteLiteral + } + return "" +} + +func (x *RouteAction) GetAutoHostRewrite() *wrapperspb.BoolValue { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_AutoHostRewrite); ok { + return x.AutoHostRewrite + } + return nil +} + +func (x *RouteAction) GetHostRewriteHeader() string { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewriteHeader); ok { + return x.HostRewriteHeader + } + return "" +} + +func (x *RouteAction) GetHostRewritePathRegex() *v32.RegexMatchAndSubstitute { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewritePathRegex); ok { + return x.HostRewritePathRegex + } + return nil +} + +func (x *RouteAction) GetAppendXForwardedHost() bool { + if x != nil { + return x.AppendXForwardedHost + } + return false +} + +func (x *RouteAction) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *RouteAction) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *RouteAction) GetEarlyDataPolicy() *v31.TypedExtensionConfig { + if x != nil { + return x.EarlyDataPolicy + } + return nil +} + +func (x *RouteAction) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *RouteAction) GetRetryPolicyTypedConfig() *anypb.Any { + if x != nil { + return x.RetryPolicyTypedConfig + } + return nil +} + +func (x *RouteAction) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *RouteAction) GetPriority() v31.RoutingPriority { + if x != nil { + return x.Priority + } + return v31.RoutingPriority(0) +} + +func (x *RouteAction) GetRateLimits() []*RateLimit { + if x != nil { + return x.RateLimits + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetIncludeVhRateLimits() *wrapperspb.BoolValue { + if x != nil { + return x.IncludeVhRateLimits + } + return nil +} + +func (x *RouteAction) GetHashPolicy() []*RouteAction_HashPolicy { + if x != nil { + return x.HashPolicy + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetCors() *CorsPolicy { + if x != nil { + return x.Cors + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetMaxGrpcTimeout() *durationpb.Duration { + if x != nil { + return x.MaxGrpcTimeout + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetGrpcTimeoutOffset() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutOffset + } + return nil +} + +func (x *RouteAction) GetUpgradeConfigs() []*RouteAction_UpgradeConfig { + if x != nil { + return x.UpgradeConfigs + } + return nil +} + +func (x *RouteAction) GetInternalRedirectPolicy() *InternalRedirectPolicy { + if x != nil { + return x.InternalRedirectPolicy + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetInternalRedirectAction() RouteAction_InternalRedirectAction { + if x != nil { + return x.InternalRedirectAction + } + return RouteAction_PASS_THROUGH_INTERNAL_REDIRECT +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetMaxInternalRedirects() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInternalRedirects + } + return nil +} + +func (x *RouteAction) GetHedgePolicy() *HedgePolicy { + if x != nil { + return x.HedgePolicy + } + return nil +} + +func (x *RouteAction) GetMaxStreamDuration() *RouteAction_MaxStreamDuration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +type isRouteAction_ClusterSpecifier interface { + isRouteAction_ClusterSpecifier() +} + +type RouteAction_Cluster struct { + // Indicates the upstream cluster to which the request should be routed + // to. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3,oneof"` +} + +type RouteAction_ClusterHeader struct { + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,2,opt,name=cluster_header,json=clusterHeader,proto3,oneof"` +} + +type RouteAction_WeightedClusters struct { + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedClusters *WeightedCluster `protobuf:"bytes,3,opt,name=weighted_clusters,json=weightedClusters,proto3,oneof"` +} + +type RouteAction_ClusterSpecifierPlugin struct { + // Name of the cluster specifier plugin to use to determine the cluster for requests on this route. + // The cluster specifier plugin name must be defined in the associated + // :ref:`cluster specifier plugins ` + // in the :ref:`name ` field. + ClusterSpecifierPlugin string `protobuf:"bytes,37,opt,name=cluster_specifier_plugin,json=clusterSpecifierPlugin,proto3,oneof"` +} + +type RouteAction_InlineClusterSpecifierPlugin struct { + // Custom cluster specifier plugin configuration to use to determine the cluster for requests + // on this route. + InlineClusterSpecifierPlugin *ClusterSpecifierPlugin `protobuf:"bytes,39,opt,name=inline_cluster_specifier_plugin,json=inlineClusterSpecifierPlugin,proto3,oneof"` +} + +func (*RouteAction_Cluster) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_ClusterHeader) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_WeightedClusters) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_ClusterSpecifierPlugin) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_InlineClusterSpecifierPlugin) isRouteAction_ClusterSpecifier() {} + +type isRouteAction_HostRewriteSpecifier interface { + isRouteAction_HostRewriteSpecifier() +} + +type RouteAction_HostRewriteLiteral struct { + // Indicates that during forwarding, the host header will be swapped with + // this value. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + HostRewriteLiteral string `protobuf:"bytes,6,opt,name=host_rewrite_literal,json=hostRewriteLiteral,proto3,oneof"` +} + +type RouteAction_AutoHostRewrite struct { + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type “strict_dns“ or “logical_dns“, + // or when :ref:`hostname ` + // field is not empty. Setting this to true with other cluster types + // has no effect. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + AutoHostRewrite *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=auto_host_rewrite,json=autoHostRewrite,proto3,oneof"` +} + +type RouteAction_HostRewriteHeader struct { + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + HostRewriteHeader string `protobuf:"bytes,29,opt,name=host_rewrite_header,json=hostRewriteHeader,proto3,oneof"` +} + +type RouteAction_HostRewritePathRegex struct { + // Indicates that during forwarding, the host header will be swapped with + // the result of the regex substitution executed on path value with query and fragment removed. + // This is useful for transitioning variable content between path segment and subdomain. + // Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + // + // For example with the following config: + // + // .. code-block:: yaml + // + // host_rewrite_path_regex: + // pattern: + // google_re2: {} + // regex: "^/(.+)/.+$" + // substitution: \1 + // + // Would rewrite the host header to “envoyproxy.io“ given the path “/envoyproxy.io/some/path“. + HostRewritePathRegex *v32.RegexMatchAndSubstitute `protobuf:"bytes,35,opt,name=host_rewrite_path_regex,json=hostRewritePathRegex,proto3,oneof"` +} + +func (*RouteAction_HostRewriteLiteral) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_AutoHostRewrite) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_HostRewriteHeader) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_HostRewritePathRegex) isRouteAction_HostRewriteSpecifier() {} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 14] +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + RetryOn string `protobuf:"bytes,1,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + // parameter is optional. The same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + PerTryTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=per_try_timeout,json=perTryTimeout,proto3" json:"per_try_timeout,omitempty"` + // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + // parameter is optional and if absent there is no per try idle timeout. The semantics of the per + // try idle timeout are similar to the + // :ref:`route idle timeout ` and + // :ref:`stream idle timeout + // ` + // both enforced by the HTTP connection manager. The difference is that this idle timeout + // is enforced by the router for each individual attempt and thus after all previous filters have + // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + // is useful in cases in which total request timeout is bounded by a number of retries and a + // :ref:`per_try_timeout `, but + // there is a desire to ensure each try is making incremental progress. Note also that similar + // to :ref:`per_try_timeout `, + // this idle timeout does not start until after both the entire request has been received by the + // router *and* a connection pool connection has been obtained. Unlike + // :ref:`per_try_timeout `, + // the idle timer continues once the response starts streaming back to the downstream client. + // This ensures that response data continues to make progress without using one of the HTTP + // connection manager idle timeouts. + PerTryIdleTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=per_try_idle_timeout,json=perTryIdleTimeout,proto3" json:"per_try_idle_timeout,omitempty"` + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority *RetryPolicy_RetryPriority `protobuf:"bytes,4,opt,name=retry_priority,json=retryPriority,proto3" json:"retry_priority,omitempty"` + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + RetryHostPredicate []*RetryPolicy_RetryHostPredicate `protobuf:"bytes,5,rep,name=retry_host_predicate,json=retryHostPredicate,proto3" json:"retry_host_predicate,omitempty"` + // Retry options predicates that will be applied prior to retrying a request. These predicates + // allow customizing request behavior between retries. + // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + RetryOptionsPredicates []*v31.TypedExtensionConfig `protobuf:"bytes,12,rep,name=retry_options_predicates,json=retryOptionsPredicates,proto3" json:"retry_options_predicates,omitempty"` + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + HostSelectionRetryMaxAttempts int64 `protobuf:"varint,6,opt,name=host_selection_retry_max_attempts,json=hostSelectionRetryMaxAttempts,proto3" json:"host_selection_retry_max_attempts,omitempty"` + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + RetriableStatusCodes []uint32 `protobuf:"varint,7,rep,packed,name=retriable_status_codes,json=retriableStatusCodes,proto3" json:"retriable_status_codes,omitempty"` + // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // “upstream.base_retry_backoff_ms“ runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff *RetryPolicy_RetryBackOff `protobuf:"bytes,8,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` + // Specifies parameters that control a retry back-off strategy that is used + // when the request is rate limited by the upstream server. The server may + // return a response header like “Retry-After“ or “X-RateLimit-Reset“ to + // provide feedback to the client on how long to wait before retrying. If + // configured, this back-off strategy will be used instead of the + // default exponential back off strategy (configured using “retry_back_off“) + // whenever a response includes the matching headers. + RateLimitedRetryBackOff *RetryPolicy_RateLimitedRetryBackOff `protobuf:"bytes,11,opt,name=rate_limited_retry_back_off,json=rateLimitedRetryBackOff,proto3" json:"rate_limited_retry_back_off,omitempty"` + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + RetriableHeaders []*HeaderMatcher `protobuf:"bytes,9,rep,name=retriable_headers,json=retriableHeaders,proto3" json:"retriable_headers,omitempty"` + // HTTP headers which must be present in the request for retries to be attempted. + RetriableRequestHeaders []*HeaderMatcher `protobuf:"bytes,10,rep,name=retriable_request_headers,json=retriableRequestHeaders,proto3" json:"retriable_request_headers,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9} +} + +func (x *RetryPolicy) GetRetryOn() string { + if x != nil { + return x.RetryOn + } + return "" +} + +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.NumRetries + } + return nil +} + +func (x *RetryPolicy) GetPerTryTimeout() *durationpb.Duration { + if x != nil { + return x.PerTryTimeout + } + return nil +} + +func (x *RetryPolicy) GetPerTryIdleTimeout() *durationpb.Duration { + if x != nil { + return x.PerTryIdleTimeout + } + return nil +} + +func (x *RetryPolicy) GetRetryPriority() *RetryPolicy_RetryPriority { + if x != nil { + return x.RetryPriority + } + return nil +} + +func (x *RetryPolicy) GetRetryHostPredicate() []*RetryPolicy_RetryHostPredicate { + if x != nil { + return x.RetryHostPredicate + } + return nil +} + +func (x *RetryPolicy) GetRetryOptionsPredicates() []*v31.TypedExtensionConfig { + if x != nil { + return x.RetryOptionsPredicates + } + return nil +} + +func (x *RetryPolicy) GetHostSelectionRetryMaxAttempts() int64 { + if x != nil { + return x.HostSelectionRetryMaxAttempts + } + return 0 +} + +func (x *RetryPolicy) GetRetriableStatusCodes() []uint32 { + if x != nil { + return x.RetriableStatusCodes + } + return nil +} + +func (x *RetryPolicy) GetRetryBackOff() *RetryPolicy_RetryBackOff { + if x != nil { + return x.RetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetRateLimitedRetryBackOff() *RetryPolicy_RateLimitedRetryBackOff { + if x != nil { + return x.RateLimitedRetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetRetriableHeaders() []*HeaderMatcher { + if x != nil { + return x.RetriableHeaders + } + return nil +} + +func (x *RetryPolicy) GetRetriableRequestHeaders() []*HeaderMatcher { + if x != nil { + return x.RetriableRequestHeaders + } + return nil +} + +// HTTP request hedging :ref:`architecture overview `. +type HedgePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + InitialRequests *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=initial_requests,json=initialRequests,proto3" json:"initial_requests,omitempty"` + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + AdditionalRequestChance *v33.FractionalPercent `protobuf:"bytes,2,opt,name=additional_request_chance,json=additionalRequestChance,proto3" json:"additional_request_chance,omitempty"` + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // - At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // - Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // - After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // + // Defaults to false. + HedgeOnPerTryTimeout bool `protobuf:"varint,3,opt,name=hedge_on_per_try_timeout,json=hedgeOnPerTryTimeout,proto3" json:"hedge_on_per_try_timeout,omitempty"` +} + +func (x *HedgePolicy) Reset() { + *x = HedgePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HedgePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HedgePolicy) ProtoMessage() {} + +func (x *HedgePolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HedgePolicy.ProtoReflect.Descriptor instead. +func (*HedgePolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10} +} + +func (x *HedgePolicy) GetInitialRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialRequests + } + return nil +} + +func (x *HedgePolicy) GetAdditionalRequestChance() *v33.FractionalPercent { + if x != nil { + return x.AdditionalRequestChance + } + return nil +} + +func (x *HedgePolicy) GetHedgeOnPerTryTimeout() bool { + if x != nil { + return x.HedgeOnPerTryTimeout + } + return false +} + +// [#next-free-field: 10] +type RedirectAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is “http“ and the port is explicitly + // set to “:80“, the port will be removed after the redirection + // 2. If the source URI scheme is “https“ and the port is explicitly + // set to “:443“, the port will be removed after the redirection + // + // Types that are assignable to SchemeRewriteSpecifier: + // + // *RedirectAction_HttpsRedirect + // *RedirectAction_SchemeRedirect + SchemeRewriteSpecifier isRedirectAction_SchemeRewriteSpecifier `protobuf_oneof:"scheme_rewrite_specifier"` + // The host portion of the URL will be swapped with this value. + HostRedirect string `protobuf:"bytes,1,opt,name=host_redirect,json=hostRedirect,proto3" json:"host_redirect,omitempty"` + // The port value of the URL will be swapped with this value. + PortRedirect uint32 `protobuf:"varint,8,opt,name=port_redirect,json=portRedirect,proto3" json:"port_redirect,omitempty"` + // Types that are assignable to PathRewriteSpecifier: + // + // *RedirectAction_PathRedirect + // *RedirectAction_PrefixRewrite + // *RedirectAction_RegexRewrite + PathRewriteSpecifier isRedirectAction_PathRewriteSpecifier `protobuf_oneof:"path_rewrite_specifier"` + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + ResponseCode RedirectAction_RedirectResponseCode `protobuf:"varint,3,opt,name=response_code,json=responseCode,proto3,enum=envoy.config.route.v3.RedirectAction_RedirectResponseCode" json:"response_code,omitempty"` + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + StripQuery bool `protobuf:"varint,6,opt,name=strip_query,json=stripQuery,proto3" json:"strip_query,omitempty"` +} + +func (x *RedirectAction) Reset() { + *x = RedirectAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RedirectAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RedirectAction) ProtoMessage() {} + +func (x *RedirectAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RedirectAction.ProtoReflect.Descriptor instead. +func (*RedirectAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11} +} + +func (m *RedirectAction) GetSchemeRewriteSpecifier() isRedirectAction_SchemeRewriteSpecifier { + if m != nil { + return m.SchemeRewriteSpecifier + } + return nil +} + +func (x *RedirectAction) GetHttpsRedirect() bool { + if x, ok := x.GetSchemeRewriteSpecifier().(*RedirectAction_HttpsRedirect); ok { + return x.HttpsRedirect + } + return false +} + +func (x *RedirectAction) GetSchemeRedirect() string { + if x, ok := x.GetSchemeRewriteSpecifier().(*RedirectAction_SchemeRedirect); ok { + return x.SchemeRedirect + } + return "" +} + +func (x *RedirectAction) GetHostRedirect() string { + if x != nil { + return x.HostRedirect + } + return "" +} + +func (x *RedirectAction) GetPortRedirect() uint32 { + if x != nil { + return x.PortRedirect + } + return 0 +} + +func (m *RedirectAction) GetPathRewriteSpecifier() isRedirectAction_PathRewriteSpecifier { + if m != nil { + return m.PathRewriteSpecifier + } + return nil +} + +func (x *RedirectAction) GetPathRedirect() string { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_PathRedirect); ok { + return x.PathRedirect + } + return "" +} + +func (x *RedirectAction) GetPrefixRewrite() string { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_PrefixRewrite); ok { + return x.PrefixRewrite + } + return "" +} + +func (x *RedirectAction) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_RegexRewrite); ok { + return x.RegexRewrite + } + return nil +} + +func (x *RedirectAction) GetResponseCode() RedirectAction_RedirectResponseCode { + if x != nil { + return x.ResponseCode + } + return RedirectAction_MOVED_PERMANENTLY +} + +func (x *RedirectAction) GetStripQuery() bool { + if x != nil { + return x.StripQuery + } + return false +} + +type isRedirectAction_SchemeRewriteSpecifier interface { + isRedirectAction_SchemeRewriteSpecifier() +} + +type RedirectAction_HttpsRedirect struct { + // The scheme portion of the URL will be swapped with "https". + HttpsRedirect bool `protobuf:"varint,4,opt,name=https_redirect,json=httpsRedirect,proto3,oneof"` +} + +type RedirectAction_SchemeRedirect struct { + // The scheme portion of the URL will be swapped with this value. + SchemeRedirect string `protobuf:"bytes,7,opt,name=scheme_redirect,json=schemeRedirect,proto3,oneof"` +} + +func (*RedirectAction_HttpsRedirect) isRedirectAction_SchemeRewriteSpecifier() {} + +func (*RedirectAction_SchemeRedirect) isRedirectAction_SchemeRewriteSpecifier() {} + +type isRedirectAction_PathRewriteSpecifier interface { + isRedirectAction_PathRewriteSpecifier() +} + +type RedirectAction_PathRedirect struct { + // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + PathRedirect string `protobuf:"bytes,2,opt,name=path_redirect,json=pathRedirect,proto3,oneof"` +} + +type RedirectAction_PrefixRewrite struct { + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + PrefixRewrite string `protobuf:"bytes,5,opt,name=prefix_rewrite,json=prefixRewrite,proto3,oneof"` +} + +type RedirectAction_RegexRewrite struct { + // Indicates that during redirect, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. + // + // Examples using Google's `RE2 `_ engine: + // + // - The path pattern “^/service/([^/]+)(/.*)$“ paired with a substitution + // string of “\2/instance/\1“ would transform “/service/foo/v1/api“ + // into “/v1/api/instance/foo“. + // + // - The pattern “one“ paired with a substitution string of “two“ would + // transform “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/two/zzz“. + // + // - The pattern “^(.*?)one(.*)$“ paired with a substitution string of + // “\1two\2“ would replace only the first occurrence of “one“, + // transforming path “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/one/zzz“. + // + // - The pattern “(?i)/xxx/“ paired with a substitution string of “/yyy/“ + // would do a case-insensitive match and transform path “/aaa/XxX/bbb“ to + // “/aaa/yyy/bbb“. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,9,opt,name=regex_rewrite,json=regexRewrite,proto3,oneof"` +} + +func (*RedirectAction_PathRedirect) isRedirectAction_PathRewriteSpecifier() {} + +func (*RedirectAction_PrefixRewrite) isRedirectAction_PathRewriteSpecifier() {} + +func (*RedirectAction_RegexRewrite) isRedirectAction_PathRewriteSpecifier() {} + +type DirectResponseAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the HTTP response status to be returned. + Status uint32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using ``response_headers_to_add`` in the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. + Body *v31.DataSource `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *DirectResponseAction) Reset() { + *x = DirectResponseAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DirectResponseAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DirectResponseAction) ProtoMessage() {} + +func (x *DirectResponseAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DirectResponseAction.ProtoReflect.Descriptor instead. +func (*DirectResponseAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{12} +} + +func (x *DirectResponseAction) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *DirectResponseAction) GetBody() *v31.DataSource { + if x != nil { + return x.Body + } + return nil +} + +// [#not-implemented-hide:] +type NonForwardingAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NonForwardingAction) Reset() { + *x = NonForwardingAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonForwardingAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonForwardingAction) ProtoMessage() {} + +func (x *NonForwardingAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonForwardingAction.ProtoReflect.Descriptor instead. +func (*NonForwardingAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{13} +} + +type Decorator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` + // Whether the decorated details should be propagated to the other party. The default is true. + Propagate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=propagate,proto3" json:"propagate,omitempty"` +} + +func (x *Decorator) Reset() { + *x = Decorator{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decorator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decorator) ProtoMessage() {} + +func (x *Decorator) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decorator.ProtoReflect.Descriptor instead. +func (*Decorator) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{14} +} + +func (x *Decorator) GetOperation() string { + if x != nil { + return x.Operation + } + return "" +} + +func (x *Decorator) GetPropagate() *wrapperspb.BoolValue { + if x != nil { + return x.Propagate + } + return nil +} + +type Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + ClientSampling *v33.FractionalPercent `protobuf:"bytes,1,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + RandomSampling *v33.FractionalPercent `protobuf:"bytes,2,opt,name=random_sampling,json=randomSampling,proto3" json:"random_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + OverallSampling *v33.FractionalPercent `protobuf:"bytes,3,opt,name=overall_sampling,json=overallSampling,proto3" json:"overall_sampling,omitempty"` + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + CustomTags []*v34.CustomTag `protobuf:"bytes,4,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty"` +} + +func (x *Tracing) Reset() { + *x = Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing) ProtoMessage() {} + +func (x *Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing.ProtoReflect.Descriptor instead. +func (*Tracing) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{15} +} + +func (x *Tracing) GetClientSampling() *v33.FractionalPercent { + if x != nil { + return x.ClientSampling + } + return nil +} + +func (x *Tracing) GetRandomSampling() *v33.FractionalPercent { + if x != nil { + return x.RandomSampling + } + return nil +} + +func (x *Tracing) GetOverallSampling() *v33.FractionalPercent { + if x != nil { + return x.OverallSampling + } + return nil +} + +func (x *Tracing) GetCustomTags() []*v34.CustomTag { + if x != nil { + return x.CustomTags + } + return nil +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +type VirtualCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers “:path“ and “:method“ can be used to match the request path and + // method, respectively. + Headers []*HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *VirtualCluster) Reset() { + *x = VirtualCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualCluster) ProtoMessage() {} + +func (x *VirtualCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualCluster.ProtoReflect.Descriptor instead. +func (*VirtualCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16} +} + +func (x *VirtualCluster) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *VirtualCluster) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Global rate limiting :ref:`architecture overview `. +// Also applies to Local rate limiting :ref:`using descriptors `. +type RateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + Stage *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=stage,proto3" json:"stage,omitempty"` + // The key to be set in runtime to disable this rate limit configuration. + DisableKey string `protobuf:"bytes,2,opt,name=disable_key,json=disableKey,proto3" json:"disable_key,omitempty"` + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + Actions []*RateLimit_Action `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Limit *RateLimit_Override `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *RateLimit) Reset() { + *x = RateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit) ProtoMessage() {} + +func (x *RateLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit.ProtoReflect.Descriptor instead. +func (*RateLimit) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17} +} + +func (x *RateLimit) GetStage() *wrapperspb.UInt32Value { + if x != nil { + return x.Stage + } + return nil +} + +func (x *RateLimit) GetDisableKey() string { + if x != nil { + return x.DisableKey + } + return "" +} + +func (x *RateLimit) GetActions() []*RateLimit_Action { + if x != nil { + return x.Actions + } + return nil +} + +func (x *RateLimit) GetLimit() *RateLimit_Override { + if x != nil { + return x.Limit + } + return nil +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` +// header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "string_match": { +// "exact": "POST" +// } +// } +// +// .. attention:: +// +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// +// [#next-free-field: 15] +type HeaderMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the name of the header in the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Specifies how the header match will be performed to route the request. + // + // Types that are assignable to HeaderMatchSpecifier: + // + // *HeaderMatcher_ExactMatch + // *HeaderMatcher_SafeRegexMatch + // *HeaderMatcher_RangeMatch + // *HeaderMatcher_PresentMatch + // *HeaderMatcher_PrefixMatch + // *HeaderMatcher_SuffixMatch + // *HeaderMatcher_ContainsMatch + // *HeaderMatcher_StringMatch + HeaderMatchSpecifier isHeaderMatcher_HeaderMatchSpecifier `protobuf_oneof:"header_match_specifier"` + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex “\d{3}“ does not match the value “1234“, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + InvertMatch bool `protobuf:"varint,8,opt,name=invert_match,json=invertMatch,proto3" json:"invert_match,omitempty"` + // If specified, for any header match rule, if the header match rule specified header + // does not exist, this header value will be treated as empty. Defaults to false. + // + // Examples: + // + // - The header match rule specified header "header1" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to true; The "header1" header is not present. The match rule will + // treat the "header1" as an empty header. The empty header does not match the range, + // so it will match when inverted. + // - The header match rule specified header "header2" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to false; The "header2" header is not present and the header + // matcher rule for "header2" will be ignored so it will not match. + // - The header match rule specified header "header3" to a string regex match + // “^$“ which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to true; The "header3" header is not present. + // The match rule will treat the "header3" header as an empty header so it will match. + // - The header match rule specified header "header4" to a string regex match + // “^$“ which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to false; The "header4" header is not present. + // The match rule for "header4" will be ignored so it will not match. + TreatMissingHeaderAsEmpty bool `protobuf:"varint,14,opt,name=treat_missing_header_as_empty,json=treatMissingHeaderAsEmpty,proto3" json:"treat_missing_header_as_empty,omitempty"` +} + +func (x *HeaderMatcher) Reset() { + *x = HeaderMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderMatcher) ProtoMessage() {} + +func (x *HeaderMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderMatcher.ProtoReflect.Descriptor instead. +func (*HeaderMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{18} +} + +func (x *HeaderMatcher) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HeaderMatcher) GetHeaderMatchSpecifier() isHeaderMatcher_HeaderMatchSpecifier { + if m != nil { + return m.HeaderMatchSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetExactMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_ExactMatch); ok { + return x.ExactMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetSafeRegexMatch() *v32.RegexMatcher { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_SafeRegexMatch); ok { + return x.SafeRegexMatch + } + return nil +} + +func (x *HeaderMatcher) GetRangeMatch() *v33.Int64Range { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_RangeMatch); ok { + return x.RangeMatch + } + return nil +} + +func (x *HeaderMatcher) GetPresentMatch() bool { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetPrefixMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_PrefixMatch); ok { + return x.PrefixMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetSuffixMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_SuffixMatch); ok { + return x.SuffixMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetContainsMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_ContainsMatch); ok { + return x.ContainsMatch + } + return "" +} + +func (x *HeaderMatcher) GetStringMatch() *v32.StringMatcher { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *HeaderMatcher) GetInvertMatch() bool { + if x != nil { + return x.InvertMatch + } + return false +} + +func (x *HeaderMatcher) GetTreatMissingHeaderAsEmpty() bool { + if x != nil { + return x.TreatMissingHeaderAsEmpty + } + return false +} + +type isHeaderMatcher_HeaderMatchSpecifier interface { + isHeaderMatcher_HeaderMatchSpecifier() +} + +type HeaderMatcher_ExactMatch struct { + // If specified, header match will be performed based on the value of the header. + // This field is deprecated. Please use :ref:`string_match `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + ExactMatch string `protobuf:"bytes,4,opt,name=exact_match,json=exactMatch,proto3,oneof"` +} + +type HeaderMatcher_SafeRegexMatch struct { + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + // This field is deprecated. Please use :ref:`string_match `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + SafeRegexMatch *v32.RegexMatcher `protobuf:"bytes,11,opt,name=safe_regex_match,json=safeRegexMatch,proto3,oneof"` +} + +type HeaderMatcher_RangeMatch struct { + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // - For range [-10,0), route will match for header value -1, but not for 0, “somestring“, 10.9, + // “-1somestring“ + RangeMatch *v33.Int64Range `protobuf:"bytes,6,opt,name=range_match,json=rangeMatch,proto3,oneof"` +} + +type HeaderMatcher_PresentMatch struct { + // If specified as true, header match will be performed based on whether the header is in the + // request. If specified as false, header match will be performed based on whether the header is absent. + PresentMatch bool `protobuf:"varint,7,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +type HeaderMatcher_PrefixMatch struct { + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The prefix “abcd“ matches the value “abcdxyz“, but not for “abcxyz“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + PrefixMatch string `protobuf:"bytes,9,opt,name=prefix_match,json=prefixMatch,proto3,oneof"` +} + +type HeaderMatcher_SuffixMatch struct { + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The suffix “abcd“ matches the value “xyzabcd“, but not for “xyzbcd“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + SuffixMatch string `protobuf:"bytes,10,opt,name=suffix_match,json=suffixMatch,proto3,oneof"` +} + +type HeaderMatcher_ContainsMatch struct { + // If specified, header match will be performed based on whether the header value contains + // the given value or not. + // Note: empty contains match is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The value “abcd“ matches the value “xyzabcdpqr“, but not for “xyzbcdpqr“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + ContainsMatch string `protobuf:"bytes,12,opt,name=contains_match,json=containsMatch,proto3,oneof"` +} + +type HeaderMatcher_StringMatch struct { + // If specified, header match will be performed based on the string match of the header value. + StringMatch *v32.StringMatcher `protobuf:"bytes,13,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +func (*HeaderMatcher_ExactMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_SafeRegexMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_RangeMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_PresentMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_PrefixMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_SuffixMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_ContainsMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_StringMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +type QueryParameterMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the name of a key that must be present in the requested + // “path“'s query string. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to QueryParameterMatchSpecifier: + // + // *QueryParameterMatcher_StringMatch + // *QueryParameterMatcher_PresentMatch + QueryParameterMatchSpecifier isQueryParameterMatcher_QueryParameterMatchSpecifier `protobuf_oneof:"query_parameter_match_specifier"` +} + +func (x *QueryParameterMatcher) Reset() { + *x = QueryParameterMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterMatcher) ProtoMessage() {} + +func (x *QueryParameterMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterMatcher.ProtoReflect.Descriptor instead. +func (*QueryParameterMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{19} +} + +func (x *QueryParameterMatcher) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *QueryParameterMatcher) GetQueryParameterMatchSpecifier() isQueryParameterMatcher_QueryParameterMatchSpecifier { + if m != nil { + return m.QueryParameterMatchSpecifier + } + return nil +} + +func (x *QueryParameterMatcher) GetStringMatch() *v32.StringMatcher { + if x, ok := x.GetQueryParameterMatchSpecifier().(*QueryParameterMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *QueryParameterMatcher) GetPresentMatch() bool { + if x, ok := x.GetQueryParameterMatchSpecifier().(*QueryParameterMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +type isQueryParameterMatcher_QueryParameterMatchSpecifier interface { + isQueryParameterMatcher_QueryParameterMatchSpecifier() +} + +type QueryParameterMatcher_StringMatch struct { + // Specifies whether a query parameter value should match against a string. + StringMatch *v32.StringMatcher `protobuf:"bytes,5,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type QueryParameterMatcher_PresentMatch struct { + // Specifies whether a query parameter should be present. + PresentMatch bool `protobuf:"varint,6,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +func (*QueryParameterMatcher_StringMatch) isQueryParameterMatcher_QueryParameterMatchSpecifier() {} + +func (*QueryParameterMatcher_PresentMatch) isQueryParameterMatcher_QueryParameterMatchSpecifier() {} + +// HTTP Internal Redirect :ref:`architecture overview `. +// [#next-free-field: 6] +type InternalRedirectPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + RedirectResponseCodes []uint32 `protobuf:"varint,2,rep,packed,name=redirect_response_codes,json=redirectResponseCodes,proto3" json:"redirect_response_codes,omitempty"` + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + // [#extension-category: envoy.internal_redirect_predicates] + Predicates []*v31.TypedExtensionConfig `protobuf:"bytes,3,rep,name=predicates,proto3" json:"predicates,omitempty"` + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + AllowCrossSchemeRedirect bool `protobuf:"varint,4,opt,name=allow_cross_scheme_redirect,json=allowCrossSchemeRedirect,proto3" json:"allow_cross_scheme_redirect,omitempty"` + // Specifies a list of headers, by name, to copy from the internal redirect into the subsequent + // request. If a header is specified here but not present in the redirect, it will be cleared in + // the subsequent request. + ResponseHeadersToCopy []string `protobuf:"bytes,5,rep,name=response_headers_to_copy,json=responseHeadersToCopy,proto3" json:"response_headers_to_copy,omitempty"` +} + +func (x *InternalRedirectPolicy) Reset() { + *x = InternalRedirectPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalRedirectPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalRedirectPolicy) ProtoMessage() {} + +func (x *InternalRedirectPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalRedirectPolicy.ProtoReflect.Descriptor instead. +func (*InternalRedirectPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{20} +} + +func (x *InternalRedirectPolicy) GetMaxInternalRedirects() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInternalRedirects + } + return nil +} + +func (x *InternalRedirectPolicy) GetRedirectResponseCodes() []uint32 { + if x != nil { + return x.RedirectResponseCodes + } + return nil +} + +func (x *InternalRedirectPolicy) GetPredicates() []*v31.TypedExtensionConfig { + if x != nil { + return x.Predicates + } + return nil +} + +func (x *InternalRedirectPolicy) GetAllowCrossSchemeRedirect() bool { + if x != nil { + return x.AllowCrossSchemeRedirect + } + return false +} + +func (x *InternalRedirectPolicy) GetResponseHeadersToCopy() []string { + if x != nil { + return x.ResponseHeadersToCopy + } + return nil +} + +// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the +// map value in +// :ref:`VirtualHost.typed_per_filter_config`, +// :ref:`Route.typed_per_filter_config`, +// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` +// to add additional flags to the filter. +type FilterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter config. + Config *anypb.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // If true, the filter is optional, meaning that if the client does + // not support the specified filter, it may ignore the map entry rather + // than rejecting the config. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` + // If true, the filter is disabled in the route or virtual host and the “config“ field is ignored. + // See :ref:`route based filter chain ` + // for more details. + // + // .. note:: + // + // This field will take effect when the request arrive and filter chain is created for the request. + // If initial route is selected for the request and a filter is disabled in the initial route, then + // the filter will not be added to the filter chain. + // And if the request is mutated later and re-match to another route, the disabled filter by the + // initial route will not be added back to the filter chain because the filter chain is already + // created and it is too late to change the chain. + // + // This field only make sense for the downstream HTTP filters for now. + Disabled bool `protobuf:"varint,3,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *FilterConfig) Reset() { + *x = FilterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterConfig) ProtoMessage() {} + +func (x *FilterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterConfig.ProtoReflect.Descriptor instead. +func (*FilterConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{21} +} + +func (x *FilterConfig) GetConfig() *anypb.Any { + if x != nil { + return x.Config + } + return nil +} + +func (x *FilterConfig) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +func (x *FilterConfig) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +// [#next-free-field: 13] +type WeightedCluster_ClusterWeight struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only one of “name“ and “cluster_header“ may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Only one of “name“ and “cluster_header“ may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,12,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` + // The weight of the cluster. This value is relative to the other clusters' + // weights. When a request matches the route, the choice of an upstream cluster + // is determined by its weight. The sum of weights across all + // entries in the clusters array must be greater than 0, and must not exceed + // uint32_t maximal value (4294967295). + Weight *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=weight,proto3" json:"weight,omitempty"` + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as “envoy.lb“. + MetadataMatch *v31.Metadata `protobuf:"bytes,3,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,4,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + RequestHeadersToRemove []string `protobuf:"bytes,9,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,5,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + ResponseHeadersToRemove []string `protobuf:"bytes,6,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // This field can be used to provide weighted cluster specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are assignable to HostRewriteSpecifier: + // + // *WeightedCluster_ClusterWeight_HostRewriteLiteral + HostRewriteSpecifier isWeightedCluster_ClusterWeight_HostRewriteSpecifier `protobuf_oneof:"host_rewrite_specifier"` +} + +func (x *WeightedCluster_ClusterWeight) Reset() { + *x = WeightedCluster_ClusterWeight{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WeightedCluster_ClusterWeight) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WeightedCluster_ClusterWeight) ProtoMessage() {} + +func (x *WeightedCluster_ClusterWeight) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WeightedCluster_ClusterWeight.ProtoReflect.Descriptor instead. +func (*WeightedCluster_ClusterWeight) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *WeightedCluster_ClusterWeight) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *WeightedCluster_ClusterWeight) GetClusterHeader() string { + if x != nil { + return x.ClusterHeader + } + return "" +} + +func (x *WeightedCluster_ClusterWeight) GetWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.Weight + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetMetadataMatch() *v31.Metadata { + if x != nil { + return x.MetadataMatch + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (m *WeightedCluster_ClusterWeight) GetHostRewriteSpecifier() isWeightedCluster_ClusterWeight_HostRewriteSpecifier { + if m != nil { + return m.HostRewriteSpecifier + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetHostRewriteLiteral() string { + if x, ok := x.GetHostRewriteSpecifier().(*WeightedCluster_ClusterWeight_HostRewriteLiteral); ok { + return x.HostRewriteLiteral + } + return "" +} + +type isWeightedCluster_ClusterWeight_HostRewriteSpecifier interface { + isWeightedCluster_ClusterWeight_HostRewriteSpecifier() +} + +type WeightedCluster_ClusterWeight_HostRewriteLiteral struct { + // Indicates that during forwarding, the host header will be swapped with + // this value. + HostRewriteLiteral string `protobuf:"bytes,11,opt,name=host_rewrite_literal,json=hostRewriteLiteral,proto3,oneof"` +} + +func (*WeightedCluster_ClusterWeight_HostRewriteLiteral) isWeightedCluster_ClusterWeight_HostRewriteSpecifier() { +} + +type RouteMatch_GrpcRouteMatchOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RouteMatch_GrpcRouteMatchOptions) Reset() { + *x = RouteMatch_GrpcRouteMatchOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_GrpcRouteMatchOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_GrpcRouteMatchOptions) ProtoMessage() {} + +func (x *RouteMatch_GrpcRouteMatchOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_GrpcRouteMatchOptions.ProtoReflect.Descriptor instead. +func (*RouteMatch_GrpcRouteMatchOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 0} +} + +type RouteMatch_TlsContextMatchOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + Presented *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=presented,proto3" json:"presented,omitempty"` + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + // + // .. warning:: + // + // Client certificate validation is not currently performed upon TLS session resumption. For + // a resumed TLS session the route will match only when ``validated`` is false, regardless of + // whether the client TLS certificate is valid. + // + // The only known workaround for this issue is to disable TLS session resumption entirely, by + // setting both :ref:`disable_stateless_session_resumption ` + // and :ref:`disable_stateful_session_resumption ` on the DownstreamTlsContext. + Validated *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=validated,proto3" json:"validated,omitempty"` +} + +func (x *RouteMatch_TlsContextMatchOptions) Reset() { + *x = RouteMatch_TlsContextMatchOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_TlsContextMatchOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_TlsContextMatchOptions) ProtoMessage() {} + +func (x *RouteMatch_TlsContextMatchOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_TlsContextMatchOptions.ProtoReflect.Descriptor instead. +func (*RouteMatch_TlsContextMatchOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *RouteMatch_TlsContextMatchOptions) GetPresented() *wrapperspb.BoolValue { + if x != nil { + return x.Presented + } + return nil +} + +func (x *RouteMatch_TlsContextMatchOptions) GetValidated() *wrapperspb.BoolValue { + if x != nil { + return x.Validated + } + return nil +} + +// An extensible message for matching CONNECT or CONNECT-UDP requests. +type RouteMatch_ConnectMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RouteMatch_ConnectMatcher) Reset() { + *x = RouteMatch_ConnectMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_ConnectMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_ConnectMatcher) ProtoMessage() {} + +func (x *RouteMatch_ConnectMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_ConnectMatcher.ProtoReflect.Descriptor instead. +func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 2} +} + +// The router is capable of shadowing traffic from one cluster to another. The current +// implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to +// respond before returning the response from the primary cluster. All normal statistics are +// collected for the shadow cluster making this feature useful for testing. +// +// During shadowing, the host/authority header is altered such that “-shadow“ is appended. This is +// useful for logging. For example, “cluster1“ becomes “cluster1-shadow“. This behavior can be +// disabled by setting “disable_shadow_host_suffix_append“ to “true“. +// +// .. note:: +// +// Shadowing will not be triggered if the primary cluster does not exist. +// +// .. note:: +// +// Shadowing doesn't support Http CONNECT and upgrades. +// +// [#next-free-field: 7] +type RouteAction_RequestMirrorPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only one of “cluster“ and “cluster_header“ can be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // Only one of “cluster“ and “cluster_header“ can be specified. + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. Only the first value in header is used, + // and no shadow request will happen if the value is not found in headers. Envoy will not wait for + // the shadow cluster to respond before returning the response from the primary cluster. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,5,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the “runtime_key“ field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + RuntimeFraction *v31.RuntimeFractionalPercent `protobuf:"bytes,3,opt,name=runtime_fraction,json=runtimeFraction,proto3" json:"runtime_fraction,omitempty"` + // Determines if the trace span should be sampled. Defaults to true. + TraceSampled *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=trace_sampled,json=traceSampled,proto3" json:"trace_sampled,omitempty"` + // Disables appending the “-shadow“ suffix to the shadowed “Host“ header. Defaults to “false“. + DisableShadowHostSuffixAppend bool `protobuf:"varint,6,opt,name=disable_shadow_host_suffix_append,json=disableShadowHostSuffixAppend,proto3" json:"disable_shadow_host_suffix_append,omitempty"` +} + +func (x *RouteAction_RequestMirrorPolicy) Reset() { + *x = RouteAction_RequestMirrorPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_RequestMirrorPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_RequestMirrorPolicy) ProtoMessage() {} + +func (x *RouteAction_RequestMirrorPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_RequestMirrorPolicy.ProtoReflect.Descriptor instead. +func (*RouteAction_RequestMirrorPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *RouteAction_RequestMirrorPolicy) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *RouteAction_RequestMirrorPolicy) GetClusterHeader() string { + if x != nil { + return x.ClusterHeader + } + return "" +} + +func (x *RouteAction_RequestMirrorPolicy) GetRuntimeFraction() *v31.RuntimeFractionalPercent { + if x != nil { + return x.RuntimeFraction + } + return nil +} + +func (x *RouteAction_RequestMirrorPolicy) GetTraceSampled() *wrapperspb.BoolValue { + if x != nil { + return x.TraceSampled + } + return nil +} + +func (x *RouteAction_RequestMirrorPolicy) GetDisableShadowHostSuffixAppend() bool { + if x != nil { + return x.DisableShadowHostSuffixAppend + } + return false +} + +// Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer +// `. +// [#next-free-field: 7] +type RouteAction_HashPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PolicySpecifier: + // + // *RouteAction_HashPolicy_Header_ + // *RouteAction_HashPolicy_Cookie_ + // *RouteAction_HashPolicy_ConnectionProperties_ + // *RouteAction_HashPolicy_QueryParameter_ + // *RouteAction_HashPolicy_FilterState_ + PolicySpecifier isRouteAction_HashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"` + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` +} + +func (x *RouteAction_HashPolicy) Reset() { + *x = RouteAction_HashPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy) ProtoMessage() {} + +func (x *RouteAction_HashPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} +} + +func (m *RouteAction_HashPolicy) GetPolicySpecifier() isRouteAction_HashPolicy_PolicySpecifier { + if m != nil { + return m.PolicySpecifier + } + return nil +} + +func (x *RouteAction_HashPolicy) GetHeader() *RouteAction_HashPolicy_Header { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_Header_); ok { + return x.Header + } + return nil +} + +func (x *RouteAction_HashPolicy) GetCookie() *RouteAction_HashPolicy_Cookie { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_Cookie_); ok { + return x.Cookie + } + return nil +} + +func (x *RouteAction_HashPolicy) GetConnectionProperties() *RouteAction_HashPolicy_ConnectionProperties { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_ConnectionProperties_); ok { + return x.ConnectionProperties + } + return nil +} + +func (x *RouteAction_HashPolicy) GetQueryParameter() *RouteAction_HashPolicy_QueryParameter { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_QueryParameter_); ok { + return x.QueryParameter + } + return nil +} + +func (x *RouteAction_HashPolicy) GetFilterState() *RouteAction_HashPolicy_FilterState { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_FilterState_); ok { + return x.FilterState + } + return nil +} + +func (x *RouteAction_HashPolicy) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +type isRouteAction_HashPolicy_PolicySpecifier interface { + isRouteAction_HashPolicy_PolicySpecifier() +} + +type RouteAction_HashPolicy_Header_ struct { + // Header hash policy. + Header *RouteAction_HashPolicy_Header `protobuf:"bytes,1,opt,name=header,proto3,oneof"` +} + +type RouteAction_HashPolicy_Cookie_ struct { + // Cookie hash policy. + Cookie *RouteAction_HashPolicy_Cookie `protobuf:"bytes,2,opt,name=cookie,proto3,oneof"` +} + +type RouteAction_HashPolicy_ConnectionProperties_ struct { + // Connection properties hash policy. + ConnectionProperties *RouteAction_HashPolicy_ConnectionProperties `protobuf:"bytes,3,opt,name=connection_properties,json=connectionProperties,proto3,oneof"` +} + +type RouteAction_HashPolicy_QueryParameter_ struct { + // Query parameter hash policy. + QueryParameter *RouteAction_HashPolicy_QueryParameter `protobuf:"bytes,5,opt,name=query_parameter,json=queryParameter,proto3,oneof"` +} + +type RouteAction_HashPolicy_FilterState_ struct { + // Filter state hash policy. + FilterState *RouteAction_HashPolicy_FilterState `protobuf:"bytes,6,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +func (*RouteAction_HashPolicy_Header_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_Cookie_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_ConnectionProperties_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_QueryParameter_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_FilterState_) isRouteAction_HashPolicy_PolicySpecifier() {} + +// Allows enabling and disabling upgrades on a per-route basis. +// This overrides any enabled/disabled upgrade filter chain specified in the +// HttpConnectionManager +// :ref:`upgrade_configs +// ` +// but does not affect any custom filter chain specified there. +type RouteAction_UpgradeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + UpgradeType string `protobuf:"bytes,1,opt,name=upgrade_type,json=upgradeType,proto3" json:"upgrade_type,omitempty"` + // Determines if upgrades are available on this route. Defaults to true. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectConfig *RouteAction_UpgradeConfig_ConnectConfig `protobuf:"bytes,3,opt,name=connect_config,json=connectConfig,proto3" json:"connect_config,omitempty"` +} + +func (x *RouteAction_UpgradeConfig) Reset() { + *x = RouteAction_UpgradeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_UpgradeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_UpgradeConfig) ProtoMessage() {} + +func (x *RouteAction_UpgradeConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_UpgradeConfig.ProtoReflect.Descriptor instead. +func (*RouteAction_UpgradeConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *RouteAction_UpgradeConfig) GetUpgradeType() string { + if x != nil { + return x.UpgradeType + } + return "" +} + +func (x *RouteAction_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *RouteAction_UpgradeConfig) GetConnectConfig() *RouteAction_UpgradeConfig_ConnectConfig { + if x != nil { + return x.ConnectConfig + } + return nil +} + +type RouteAction_MaxStreamDuration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // “max_stream_duration“, but limit the applied timeout to the maximum value specified here. + // If set to 0, the “grpc-timeout“ header is used without modification. + GrpcTimeoutHeaderMax *durationpb.Duration `protobuf:"bytes,2,opt,name=grpc_timeout_header_max,json=grpcTimeoutHeaderMax,proto3" json:"grpc_timeout_header_max,omitempty"` + // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + GrpcTimeoutHeaderOffset *durationpb.Duration `protobuf:"bytes,3,opt,name=grpc_timeout_header_offset,json=grpcTimeoutHeaderOffset,proto3" json:"grpc_timeout_header_offset,omitempty"` +} + +func (x *RouteAction_MaxStreamDuration) Reset() { + *x = RouteAction_MaxStreamDuration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_MaxStreamDuration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_MaxStreamDuration) ProtoMessage() {} + +func (x *RouteAction_MaxStreamDuration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_MaxStreamDuration.ProtoReflect.Descriptor instead. +func (*RouteAction_MaxStreamDuration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 3} +} + +func (x *RouteAction_MaxStreamDuration) GetMaxStreamDuration() *durationpb.Duration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderMax() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutHeaderMax + } + return nil +} + +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderOffset() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutHeaderOffset + } + return nil +} + +type RouteAction_HashPolicy_Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` + // If specified, the request header value will be rewritten and used + // to produce the hash key. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,2,opt,name=regex_rewrite,json=regexRewrite,proto3" json:"regex_rewrite,omitempty"` +} + +func (x *RouteAction_HashPolicy_Header) Reset() { + *x = RouteAction_HashPolicy_Header{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_Header) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_Header) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_Header.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_Header) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 0} +} + +func (x *RouteAction_HashPolicy_Header) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +func (x *RouteAction_HashPolicy_Header) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x != nil { + return x.RegexRewrite + } + return nil +} + +// CookieAttribute defines an API for adding additional attributes for a HTTP cookie. +type RouteAction_HashPolicy_CookieAttribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cookie attribute. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The optional value of the cookie attribute. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *RouteAction_HashPolicy_CookieAttribute) Reset() { + *x = RouteAction_HashPolicy_CookieAttribute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_CookieAttribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_CookieAttribute) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_CookieAttribute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_CookieAttribute.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_CookieAttribute) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 1} +} + +func (x *RouteAction_HashPolicy_CookieAttribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteAction_HashPolicy_CookieAttribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Envoy supports two types of cookie affinity: +// +// 1. Passive. Envoy takes a cookie that's present in the cookies header and +// hashes on its value. +// +// 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) +// on the first request from the client in its response to the client, +// based on the endpoint the request gets sent to. The client then +// presents this on the next and all subsequent requests. The hash of +// this is sufficient to ensure these requests get sent to the same +// endpoint. The cookie is generated by hashing the source and +// destination ports and addresses so that multiple independent HTTP2 +// streams on the same connection will independently receive the same +// cookie, even if they arrive at the Envoy simultaneously. +type RouteAction_HashPolicy_Cookie struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` + // Additional attributes for the cookie. They will be used when generating a new cookie. + Attributes []*RouteAction_HashPolicy_CookieAttribute `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (x *RouteAction_HashPolicy_Cookie) Reset() { + *x = RouteAction_HashPolicy_Cookie{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_Cookie) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_Cookie) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_Cookie) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_Cookie.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_Cookie) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 2} +} + +func (x *RouteAction_HashPolicy_Cookie) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteAction_HashPolicy_Cookie) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *RouteAction_HashPolicy_Cookie) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *RouteAction_HashPolicy_Cookie) GetAttributes() []*RouteAction_HashPolicy_CookieAttribute { + if x != nil { + return x.Attributes + } + return nil +} + +type RouteAction_HashPolicy_ConnectionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Hash on source IP address. + SourceIp bool `protobuf:"varint,1,opt,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"` +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) Reset() { + *x = RouteAction_HashPolicy_ConnectionProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_ConnectionProperties) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_ConnectionProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_ConnectionProperties.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_ConnectionProperties) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 3} +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) GetSourceIp() bool { + if x != nil { + return x.SourceIp + } + return false +} + +type RouteAction_HashPolicy_QueryParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. If query parameters are repeated, only + // the first value will be considered. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *RouteAction_HashPolicy_QueryParameter) Reset() { + *x = RouteAction_HashPolicy_QueryParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_QueryParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_QueryParameter) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_QueryParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_QueryParameter.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_QueryParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 4} +} + +func (x *RouteAction_HashPolicy_QueryParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type RouteAction_HashPolicy_FilterState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Object in the per-request filterState, which is an + // Envoy::Hashable object. If there is no data associated with the key, + // or the stored object is not Envoy::Hashable, no hash will be produced. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *RouteAction_HashPolicy_FilterState) Reset() { + *x = RouteAction_HashPolicy_FilterState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_FilterState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_FilterState) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_FilterState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_FilterState.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_FilterState) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 5} +} + +func (x *RouteAction_HashPolicy_FilterState) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Configuration for sending data upstream as a raw data payload. This is used for +// CONNECT or POST requests, when forwarding request payload as raw TCP. +type RouteAction_UpgradeConfig_ConnectConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + ProxyProtocolConfig *v31.ProxyProtocolConfig `protobuf:"bytes,1,opt,name=proxy_protocol_config,json=proxyProtocolConfig,proto3" json:"proxy_protocol_config,omitempty"` + // If set, the route will also allow forwarding POST payload as raw TCP. + AllowPost bool `protobuf:"varint,2,opt,name=allow_post,json=allowPost,proto3" json:"allow_post,omitempty"` +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) Reset() { + *x = RouteAction_UpgradeConfig_ConnectConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_UpgradeConfig_ConnectConfig) ProtoMessage() {} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_UpgradeConfig_ConnectConfig.ProtoReflect.Descriptor instead. +func (*RouteAction_UpgradeConfig_ConnectConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2, 0} +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) GetProxyProtocolConfig() *v31.ProxyProtocolConfig { + if x != nil { + return x.ProxyProtocolConfig + } + return nil +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) GetAllowPost() bool { + if x != nil { + return x.AllowPost + } + return false +} + +type RetryPolicy_RetryPriority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.retry_priorities] + // + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryPriority_TypedConfig + ConfigType isRetryPolicy_RetryPriority_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryPriority) Reset() { + *x = RetryPolicy_RetryPriority{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryPriority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryPriority) ProtoMessage() {} + +func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *RetryPolicy_RetryPriority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryPriority_ConfigType interface { + isRetryPolicy_RetryPriority_ConfigType() +} + +type RetryPolicy_RetryPriority_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} + +type RetryPolicy_RetryHostPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.retry_host_predicates] + // + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryHostPredicate_TypedConfig + ConfigType isRetryPolicy_RetryHostPredicate_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryHostPredicate) Reset() { + *x = RetryPolicy_RetryHostPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryHostPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} + +func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *RetryPolicy_RetryHostPredicate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHostPredicate_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryHostPredicate_ConfigType interface { + isRetryPolicy_RetryHostPredicate_ConfigType() +} + +type RetryPolicy_RetryHostPredicate_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} + +type RetryPolicy_RetryBackOff struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the “base_interval“ if set. The default is 10 times the + // “base_interval“. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *RetryPolicy_RetryBackOff) Reset() { + *x = RetryPolicy_RetryBackOff{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryBackOff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryBackOff) ProtoMessage() {} + +func (x *RetryPolicy_RetryBackOff) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryBackOff.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryBackOff) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *RetryPolicy_RetryBackOff) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *RetryPolicy_RetryBackOff) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +type RetryPolicy_ResetHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The format of the reset header. + Format RetryPolicy_ResetHeaderFormat `protobuf:"varint,2,opt,name=format,proto3,enum=envoy.config.route.v3.RetryPolicy_ResetHeaderFormat" json:"format,omitempty"` +} + +func (x *RetryPolicy_ResetHeader) Reset() { + *x = RetryPolicy_ResetHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_ResetHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_ResetHeader) ProtoMessage() {} + +func (x *RetryPolicy_ResetHeader) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_ResetHeader.ProtoReflect.Descriptor instead. +func (*RetryPolicy_ResetHeader) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *RetryPolicy_ResetHeader) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RetryPolicy_ResetHeader) GetFormat() RetryPolicy_ResetHeaderFormat { + if x != nil { + return x.Format + } + return RetryPolicy_SECONDS +} + +// A retry back-off strategy that applies when the upstream server rate limits +// the request. +// +// Given this configuration: +// +// .. code-block:: yaml +// +// rate_limited_retry_back_off: +// reset_headers: +// - name: Retry-After +// format: SECONDS +// - name: X-RateLimit-Reset +// format: UNIX_TIMESTAMP +// max_interval: "300s" +// +// The following algorithm will apply: +// +// 1. If the response contains the header “Retry-After“ its value must be on +// the form “120“ (an integer that represents the number of seconds to +// wait before retrying). If so, this value is used as the back-off interval. +// 2. Otherwise, if the response contains the header “X-RateLimit-Reset“ its +// value must be on the form “1595320702“ (an integer that represents the +// point in time at which to retry, as a Unix timestamp in seconds). If so, +// the current time is subtracted from this value and the result is used as +// the back-off interval. +// 3. Otherwise, Envoy will use the default +// :ref:`exponential back-off ` +// strategy. +// +// No matter which format is used, if the resulting back-off interval exceeds +// “max_interval“ it is discarded and the next header in “reset_headers“ +// is tried. If a request timeout is configured for the route it will further +// limit how long the request will be allowed to run. +// +// To prevent many clients retrying at the same point in time jitter is added +// to the back-off interval, so the resulting interval is decided by taking: +// “random(interval, interval * 1.5)“. +// +// .. attention:: +// +// Configuring ``rate_limited_retry_back_off`` will not by itself cause a request +// to be retried. You will still need to configure the right retry policy to match +// the responses from the upstream server. +type RetryPolicy_RateLimitedRetryBackOff struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the reset headers (like “Retry-After“ or “X-RateLimit-Reset“) + // to match against the response. Headers are tried in order, and matched case + // insensitive. The first header to be parsed successfully is used. If no headers + // match the default exponential back-off is used instead. + ResetHeaders []*RetryPolicy_ResetHeader `protobuf:"bytes,1,rep,name=reset_headers,json=resetHeaders,proto3" json:"reset_headers,omitempty"` + // Specifies the maximum back off interval that Envoy will allow. If a reset + // header contains an interval longer than this then it will be discarded and + // the next header will be tried. Defaults to 300 seconds. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) Reset() { + *x = RetryPolicy_RateLimitedRetryBackOff{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RateLimitedRetryBackOff) ProtoMessage() {} + +func (x *RetryPolicy_RateLimitedRetryBackOff) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RateLimitedRetryBackOff.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RateLimitedRetryBackOff) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) GetResetHeaders() []*RetryPolicy_ResetHeader { + if x != nil { + return x.ResetHeaders + } + return nil +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +// [#next-free-field: 12] +type RateLimit_Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ActionSpecifier: + // + // *RateLimit_Action_SourceCluster_ + // *RateLimit_Action_DestinationCluster_ + // *RateLimit_Action_RequestHeaders_ + // *RateLimit_Action_RemoteAddress_ + // *RateLimit_Action_GenericKey_ + // *RateLimit_Action_HeaderValueMatch_ + // *RateLimit_Action_DynamicMetadata + // *RateLimit_Action_Metadata + // *RateLimit_Action_Extension + // *RateLimit_Action_MaskedRemoteAddress_ + // *RateLimit_Action_QueryParameterValueMatch_ + ActionSpecifier isRateLimit_Action_ActionSpecifier `protobuf_oneof:"action_specifier"` +} + +func (x *RateLimit_Action) Reset() { + *x = RateLimit_Action{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action) ProtoMessage() {} + +func (x *RateLimit_Action) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action.ProtoReflect.Descriptor instead. +func (*RateLimit_Action) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0} +} + +func (m *RateLimit_Action) GetActionSpecifier() isRateLimit_Action_ActionSpecifier { + if m != nil { + return m.ActionSpecifier + } + return nil +} + +func (x *RateLimit_Action) GetSourceCluster() *RateLimit_Action_SourceCluster { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_SourceCluster_); ok { + return x.SourceCluster + } + return nil +} + +func (x *RateLimit_Action) GetDestinationCluster() *RateLimit_Action_DestinationCluster { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_DestinationCluster_); ok { + return x.DestinationCluster + } + return nil +} + +func (x *RateLimit_Action) GetRequestHeaders() *RateLimit_Action_RequestHeaders { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_RequestHeaders_); ok { + return x.RequestHeaders + } + return nil +} + +func (x *RateLimit_Action) GetRemoteAddress() *RateLimit_Action_RemoteAddress { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_RemoteAddress_); ok { + return x.RemoteAddress + } + return nil +} + +func (x *RateLimit_Action) GetGenericKey() *RateLimit_Action_GenericKey { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_GenericKey_); ok { + return x.GenericKey + } + return nil +} + +func (x *RateLimit_Action) GetHeaderValueMatch() *RateLimit_Action_HeaderValueMatch { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_HeaderValueMatch_); ok { + return x.HeaderValueMatch + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RateLimit_Action) GetDynamicMetadata() *RateLimit_Action_DynamicMetaData { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_DynamicMetadata); ok { + return x.DynamicMetadata + } + return nil +} + +func (x *RateLimit_Action) GetMetadata() *RateLimit_Action_MetaData { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *RateLimit_Action) GetExtension() *v31.TypedExtensionConfig { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_Extension); ok { + return x.Extension + } + return nil +} + +func (x *RateLimit_Action) GetMaskedRemoteAddress() *RateLimit_Action_MaskedRemoteAddress { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_MaskedRemoteAddress_); ok { + return x.MaskedRemoteAddress + } + return nil +} + +func (x *RateLimit_Action) GetQueryParameterValueMatch() *RateLimit_Action_QueryParameterValueMatch { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_QueryParameterValueMatch_); ok { + return x.QueryParameterValueMatch + } + return nil +} + +type isRateLimit_Action_ActionSpecifier interface { + isRateLimit_Action_ActionSpecifier() +} + +type RateLimit_Action_SourceCluster_ struct { + // Rate limit on source cluster. + SourceCluster *RateLimit_Action_SourceCluster `protobuf:"bytes,1,opt,name=source_cluster,json=sourceCluster,proto3,oneof"` +} + +type RateLimit_Action_DestinationCluster_ struct { + // Rate limit on destination cluster. + DestinationCluster *RateLimit_Action_DestinationCluster `protobuf:"bytes,2,opt,name=destination_cluster,json=destinationCluster,proto3,oneof"` +} + +type RateLimit_Action_RequestHeaders_ struct { + // Rate limit on request headers. + RequestHeaders *RateLimit_Action_RequestHeaders `protobuf:"bytes,3,opt,name=request_headers,json=requestHeaders,proto3,oneof"` +} + +type RateLimit_Action_RemoteAddress_ struct { + // Rate limit on remote address. + RemoteAddress *RateLimit_Action_RemoteAddress `protobuf:"bytes,4,opt,name=remote_address,json=remoteAddress,proto3,oneof"` +} + +type RateLimit_Action_GenericKey_ struct { + // Rate limit on a generic key. + GenericKey *RateLimit_Action_GenericKey `protobuf:"bytes,5,opt,name=generic_key,json=genericKey,proto3,oneof"` +} + +type RateLimit_Action_HeaderValueMatch_ struct { + // Rate limit on the existence of request headers. + HeaderValueMatch *RateLimit_Action_HeaderValueMatch `protobuf:"bytes,6,opt,name=header_value_match,json=headerValueMatch,proto3,oneof"` +} + +type RateLimit_Action_DynamicMetadata struct { + // Rate limit on dynamic metadata. + // + // .. attention:: + // + // This field has been deprecated in favor of the :ref:`metadata ` field + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + DynamicMetadata *RateLimit_Action_DynamicMetaData `protobuf:"bytes,7,opt,name=dynamic_metadata,json=dynamicMetadata,proto3,oneof"` +} + +type RateLimit_Action_Metadata struct { + // Rate limit on metadata. + Metadata *RateLimit_Action_MetaData `protobuf:"bytes,8,opt,name=metadata,proto3,oneof"` +} + +type RateLimit_Action_Extension struct { + // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + // + // :ref:`HTTP matching input functions ` are + // permitted as descriptor extensions. The input functions are only + // looked up if there is no rate limit descriptor extension matching + // the type URL. + // + // [#extension-category: envoy.rate_limit_descriptors] + Extension *v31.TypedExtensionConfig `protobuf:"bytes,9,opt,name=extension,proto3,oneof"` +} + +type RateLimit_Action_MaskedRemoteAddress_ struct { + // Rate limit on masked remote address. + MaskedRemoteAddress *RateLimit_Action_MaskedRemoteAddress `protobuf:"bytes,10,opt,name=masked_remote_address,json=maskedRemoteAddress,proto3,oneof"` +} + +type RateLimit_Action_QueryParameterValueMatch_ struct { + // Rate limit on the existence of query parameters. + QueryParameterValueMatch *RateLimit_Action_QueryParameterValueMatch `protobuf:"bytes,11,opt,name=query_parameter_value_match,json=queryParameterValueMatch,proto3,oneof"` +} + +func (*RateLimit_Action_SourceCluster_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_DestinationCluster_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_RequestHeaders_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_RemoteAddress_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_GenericKey_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_HeaderValueMatch_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_DynamicMetadata) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_Metadata) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_Extension) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_MaskedRemoteAddress_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_QueryParameterValueMatch_) isRateLimit_Action_ActionSpecifier() {} + +type RateLimit_Override struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OverrideSpecifier: + // + // *RateLimit_Override_DynamicMetadata_ + OverrideSpecifier isRateLimit_Override_OverrideSpecifier `protobuf_oneof:"override_specifier"` +} + +func (x *RateLimit_Override) Reset() { + *x = RateLimit_Override{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Override) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Override) ProtoMessage() {} + +func (x *RateLimit_Override) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Override.ProtoReflect.Descriptor instead. +func (*RateLimit_Override) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1} +} + +func (m *RateLimit_Override) GetOverrideSpecifier() isRateLimit_Override_OverrideSpecifier { + if m != nil { + return m.OverrideSpecifier + } + return nil +} + +func (x *RateLimit_Override) GetDynamicMetadata() *RateLimit_Override_DynamicMetadata { + if x, ok := x.GetOverrideSpecifier().(*RateLimit_Override_DynamicMetadata_); ok { + return x.DynamicMetadata + } + return nil +} + +type isRateLimit_Override_OverrideSpecifier interface { + isRateLimit_Override_OverrideSpecifier() +} + +type RateLimit_Override_DynamicMetadata_ struct { + // Limit override from dynamic metadata. + DynamicMetadata *RateLimit_Override_DynamicMetadata `protobuf:"bytes,1,opt,name=dynamic_metadata,json=dynamicMetadata,proto3,oneof"` +} + +func (*RateLimit_Override_DynamicMetadata_) isRateLimit_Override_OverrideSpecifier() {} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("source_cluster", "") +// +// is derived from the :option:`--service-cluster` option. +type RateLimit_Action_SourceCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_SourceCluster) Reset() { + *x = RateLimit_Action_SourceCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_SourceCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_SourceCluster) ProtoMessage() {} + +func (x *RateLimit_Action_SourceCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_SourceCluster.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_SourceCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 0} +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("destination_cluster", "") +// +// Once a request matches against a route table rule, a routed cluster is determined by one of +// the following :ref:`route table configuration ` +// settings: +// +// - :ref:`cluster ` indicates the upstream cluster +// to route to. +// - :ref:`weighted_clusters ` +// chooses a cluster randomly from a set of clusters with attributed weight. +// - :ref:`cluster_header ` indicates which +// header in the request contains the target cluster. +type RateLimit_Action_DestinationCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_DestinationCluster) Reset() { + *x = RateLimit_Action_DestinationCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_DestinationCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_DestinationCluster) ProtoMessage() {} + +func (x *RateLimit_Action_DestinationCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_DestinationCluster.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_DestinationCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 1} +} + +// The following descriptor entry is appended when a header contains a key that matches the +// “header_name“: +// +// .. code-block:: cpp +// +// ("", "") +type RateLimit_Action_RequestHeaders struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,2,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + SkipIfAbsent bool `protobuf:"varint,3,opt,name=skip_if_absent,json=skipIfAbsent,proto3" json:"skip_if_absent,omitempty"` +} + +func (x *RateLimit_Action_RequestHeaders) Reset() { + *x = RateLimit_Action_RequestHeaders{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_RequestHeaders) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_RequestHeaders) ProtoMessage() {} + +func (x *RateLimit_Action_RequestHeaders) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_RequestHeaders.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_RequestHeaders) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 2} +} + +func (x *RateLimit_Action_RequestHeaders) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +func (x *RateLimit_Action_RequestHeaders) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_RequestHeaders) GetSkipIfAbsent() bool { + if x != nil { + return x.SkipIfAbsent + } + return false +} + +// The following descriptor entry is appended to the descriptor and is populated using the +// trusted address from :ref:`x-forwarded-for `: +// +// .. code-block:: cpp +// +// ("remote_address", "") +type RateLimit_Action_RemoteAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_RemoteAddress) Reset() { + *x = RateLimit_Action_RemoteAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_RemoteAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_RemoteAddress) ProtoMessage() {} + +func (x *RateLimit_Action_RemoteAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_RemoteAddress.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_RemoteAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 3} +} + +// The following descriptor entry is appended to the descriptor and is populated using the +// masked address from :ref:`x-forwarded-for `: +// +// .. code-block:: cpp +// +// ("masked_remote_address", "") +type RateLimit_Action_MaskedRemoteAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of prefix mask len for IPv4 (e.g. 0, 32). + // Defaults to 32 when unset. + // For example, trusted address from x-forwarded-for is “192.168.1.1“, + // the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); + // if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). + V4PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=v4_prefix_mask_len,json=v4PrefixMaskLen,proto3" json:"v4_prefix_mask_len,omitempty"` + // Length of prefix mask len for IPv6 (e.g. 0, 128). + // Defaults to 128 when unset. + // For example, trusted address from x-forwarded-for is “2001:abcd:ef01:2345:6789:abcd:ef01:234“, + // the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); + // if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). + V6PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=v6_prefix_mask_len,json=v6PrefixMaskLen,proto3" json:"v6_prefix_mask_len,omitempty"` +} + +func (x *RateLimit_Action_MaskedRemoteAddress) Reset() { + *x = RateLimit_Action_MaskedRemoteAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_MaskedRemoteAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_MaskedRemoteAddress) ProtoMessage() {} + +func (x *RateLimit_Action_MaskedRemoteAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_MaskedRemoteAddress.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_MaskedRemoteAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 4} +} + +func (x *RateLimit_Action_MaskedRemoteAddress) GetV4PrefixMaskLen() *wrapperspb.UInt32Value { + if x != nil { + return x.V4PrefixMaskLen + } + return nil +} + +func (x *RateLimit_Action_MaskedRemoteAddress) GetV6PrefixMaskLen() *wrapperspb.UInt32Value { + if x != nil { + return x.V6PrefixMaskLen + } + return nil +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("generic_key", "") +type RateLimit_Action_GenericKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // An optional key to use in the descriptor entry. If not set it defaults + // to 'generic_key' as the descriptor key. + DescriptorKey string `protobuf:"bytes,2,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` +} + +func (x *RateLimit_Action_GenericKey) Reset() { + *x = RateLimit_Action_GenericKey{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_GenericKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_GenericKey) ProtoMessage() {} + +func (x *RateLimit_Action_GenericKey) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_GenericKey.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_GenericKey) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 5} +} + +func (x *RateLimit_Action_GenericKey) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_GenericKey) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("header_match", "") +type RateLimit_Action_HeaderValueMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. Defaults to “header_match“. + DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + Headers []*HeaderMatcher `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *RateLimit_Action_HeaderValueMatch) Reset() { + *x = RateLimit_Action_HeaderValueMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_HeaderValueMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_HeaderValueMatch) ProtoMessage() {} + +func (x *RateLimit_Action_HeaderValueMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_HeaderValueMatch.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_HeaderValueMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 6} +} + +func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_HeaderValueMatch) GetExpectMatch() *wrapperspb.BoolValue { + if x != nil { + return x.ExpectMatch + } + return nil +} + +func (x *RateLimit_Action_HeaderValueMatch) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// The following descriptor entry is appended when the +// :ref:`dynamic metadata ` contains a key value: +// +// .. code-block:: cpp +// +// ("", "") +// +// .. attention:: +// +// This action has been deprecated in favor of the :ref:`metadata ` action +type RateLimit_Action_DynamicMetaData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,1,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // An optional value to use if “metadata_key“ is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *RateLimit_Action_DynamicMetaData) Reset() { + *x = RateLimit_Action_DynamicMetaData{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_DynamicMetaData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_DynamicMetaData) ProtoMessage() {} + +func (x *RateLimit_Action_DynamicMetaData) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_DynamicMetaData.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_DynamicMetaData) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 7} +} + +func (x *RateLimit_Action_DynamicMetaData) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_DynamicMetaData) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *RateLimit_Action_DynamicMetaData) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// The following descriptor entry is appended when the metadata contains a key value: +// +// .. code-block:: cpp +// +// ("", "") +// +// [#next-free-field: 6] +type RateLimit_Action_MetaData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,1,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // An optional value to use if “metadata_key“ is empty. If not set and + // no value is present under the metadata_key then “skip_if_absent“ is followed to + // skip calling the rate limiting service or skip the descriptor. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Source of metadata + Source RateLimit_Action_MetaData_Source `protobuf:"varint,4,opt,name=source,proto3,enum=envoy.config.route.v3.RateLimit_Action_MetaData_Source" json:"source,omitempty"` + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when “metadata_key“ is empty and “default_value“ is not set. By default it skips calling the + // rate limiting service in that case. + SkipIfAbsent bool `protobuf:"varint,5,opt,name=skip_if_absent,json=skipIfAbsent,proto3" json:"skip_if_absent,omitempty"` +} + +func (x *RateLimit_Action_MetaData) Reset() { + *x = RateLimit_Action_MetaData{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_MetaData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_MetaData) ProtoMessage() {} + +func (x *RateLimit_Action_MetaData) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_MetaData.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_MetaData) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8} +} + +func (x *RateLimit_Action_MetaData) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_MetaData) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *RateLimit_Action_MetaData) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +func (x *RateLimit_Action_MetaData) GetSource() RateLimit_Action_MetaData_Source { + if x != nil { + return x.Source + } + return RateLimit_Action_MetaData_DYNAMIC +} + +func (x *RateLimit_Action_MetaData) GetSkipIfAbsent() bool { + if x != nil { + return x.SkipIfAbsent + } + return false +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("query_match", "") +type RateLimit_Action_QueryParameterValueMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. Defaults to “query_match“. + DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + // Specifies a set of query parameters that the rate limit action should match + // on. The action will check the request’s query parameters against all the + // specified query parameters in the config. A match will happen if all the + // query parameters in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + QueryParameters []*QueryParameterMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` +} + +func (x *RateLimit_Action_QueryParameterValueMatch) Reset() { + *x = RateLimit_Action_QueryParameterValueMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_QueryParameterValueMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_QueryParameterValueMatch) ProtoMessage() {} + +func (x *RateLimit_Action_QueryParameterValueMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_QueryParameterValueMatch.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_QueryParameterValueMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 9} +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetExpectMatch() *wrapperspb.BoolValue { + if x != nil { + return x.ExpectMatch + } + return nil +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetQueryParameters() []*QueryParameterMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +// Fetches the override from the dynamic metadata. +type RateLimit_Override_DynamicMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + MetadataKey *v35.MetadataKey `protobuf:"bytes,1,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` +} + +func (x *RateLimit_Override_DynamicMetadata) Reset() { + *x = RateLimit_Override_DynamicMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Override_DynamicMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Override_DynamicMetadata) ProtoMessage() {} + +func (x *RateLimit_Override_DynamicMetadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Override_DynamicMetadata.ProtoReflect.Descriptor instead. +func (*RateLimit_Override_DynamicMetadata) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1, 0} +} + +func (x *RateLimit_Override_DynamicMetadata) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +var File_envoy_config_route_v3_route_components_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x0f, 0x0a, 0x0b, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, + 0x08, 0x01, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, + 0x06, 0x02, 0x08, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, + 0x0b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x2e, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x54, 0x6c, 0x73, 0x12, + 0x50, 0x0a, 0x10, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, + 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, + 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, + 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x73, 0x0a, 0x17, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, + 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, + 0x1f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, + 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, + 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x52, + 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0x0a, 0x09, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x05, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x05, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a, + 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x13, + 0x6e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x3e, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x6d, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, + 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, + 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, + 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, + 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, + 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, + 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, + 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1f, + 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x42, + 0x0d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf3, 0x0a, + 0x0a, 0x0f, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, + 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, 0x0a, 0x0d, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x45, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, + 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, + 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x14, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x00, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x5d, 0x0a, + 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x37, 0x9a, 0xc5, + 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x72, 0x61, 0x6e, + 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x52, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, + 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x15, + 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, 0x5d, 0x2b, 0x5b, 0x5e, + 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x53, 0x65, 0x70, + 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x58, 0x0a, 0x11, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x73, + 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x61, 0x73, 0x65, + 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, + 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, 0x12, 0x59, 0x0a, 0x0b, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, 0x0a, 0x15, 0x47, 0x72, 0x70, 0x63, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc9, 0x01, + 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, + 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xa8, 0x06, 0x0a, 0x0a, + 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x5b, + 0x0a, 0x1c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x61, 0x0a, 0x1f, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1c, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x3a, 0x24, + 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa6, 0x2d, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, + 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, + 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, + 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, + 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, + 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, + 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, + 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, + 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcb, 0x03, 0x0a, 0x13, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, + 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, + 0x48, 0x0a, 0x21, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, + 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, + 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, + 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, + 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, + 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, + 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, + 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, + 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, + 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, + 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, + 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, + 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, + 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, + 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, + 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, + 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, + 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, + 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, + 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x68, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, + 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, + 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, + 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, + 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, + 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, + 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, + 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, + 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, + 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x53, + 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, + 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, + 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, + 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x76, 0x34, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, 0x52, 0x0f, 0x76, + 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x12, 0x53, + 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, + 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, + 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x3a, 0x35, 0x9a, + 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, + 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x3b, 0x9a, + 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, + 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, + 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, + 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x66, + 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x12, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, + 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3c, + 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, + 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x18, 0x01, + 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x70, + 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_route_components_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_route_components_proto_rawDescData = file_envoy_config_route_v3_route_components_proto_rawDesc +) + +func file_envoy_config_route_v3_route_components_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_route_components_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_route_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_route_components_proto_rawDescData) + }) + return file_envoy_config_route_v3_route_components_proto_rawDescData +} + +var file_envoy_config_route_v3_route_components_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_envoy_config_route_v3_route_components_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_envoy_config_route_v3_route_components_proto_goTypes = []interface{}{ + (VirtualHost_TlsRequirementType)(0), // 0: envoy.config.route.v3.VirtualHost.TlsRequirementType + (RouteAction_ClusterNotFoundResponseCode)(0), // 1: envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + (RouteAction_InternalRedirectAction)(0), // 2: envoy.config.route.v3.RouteAction.InternalRedirectAction + (RetryPolicy_ResetHeaderFormat)(0), // 3: envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + (RedirectAction_RedirectResponseCode)(0), // 4: envoy.config.route.v3.RedirectAction.RedirectResponseCode + (RateLimit_Action_MetaData_Source)(0), // 5: envoy.config.route.v3.RateLimit.Action.MetaData.Source + (*VirtualHost)(nil), // 6: envoy.config.route.v3.VirtualHost + (*FilterAction)(nil), // 7: envoy.config.route.v3.FilterAction + (*RouteList)(nil), // 8: envoy.config.route.v3.RouteList + (*Route)(nil), // 9: envoy.config.route.v3.Route + (*WeightedCluster)(nil), // 10: envoy.config.route.v3.WeightedCluster + (*ClusterSpecifierPlugin)(nil), // 11: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteMatch)(nil), // 12: envoy.config.route.v3.RouteMatch + (*CorsPolicy)(nil), // 13: envoy.config.route.v3.CorsPolicy + (*RouteAction)(nil), // 14: envoy.config.route.v3.RouteAction + (*RetryPolicy)(nil), // 15: envoy.config.route.v3.RetryPolicy + (*HedgePolicy)(nil), // 16: envoy.config.route.v3.HedgePolicy + (*RedirectAction)(nil), // 17: envoy.config.route.v3.RedirectAction + (*DirectResponseAction)(nil), // 18: envoy.config.route.v3.DirectResponseAction + (*NonForwardingAction)(nil), // 19: envoy.config.route.v3.NonForwardingAction + (*Decorator)(nil), // 20: envoy.config.route.v3.Decorator + (*Tracing)(nil), // 21: envoy.config.route.v3.Tracing + (*VirtualCluster)(nil), // 22: envoy.config.route.v3.VirtualCluster + (*RateLimit)(nil), // 23: envoy.config.route.v3.RateLimit + (*HeaderMatcher)(nil), // 24: envoy.config.route.v3.HeaderMatcher + (*QueryParameterMatcher)(nil), // 25: envoy.config.route.v3.QueryParameterMatcher + (*InternalRedirectPolicy)(nil), // 26: envoy.config.route.v3.InternalRedirectPolicy + (*FilterConfig)(nil), // 27: envoy.config.route.v3.FilterConfig + nil, // 28: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + nil, // 29: envoy.config.route.v3.Route.TypedPerFilterConfigEntry + (*WeightedCluster_ClusterWeight)(nil), // 30: envoy.config.route.v3.WeightedCluster.ClusterWeight + nil, // 31: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + (*RouteMatch_GrpcRouteMatchOptions)(nil), // 32: envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + (*RouteMatch_TlsContextMatchOptions)(nil), // 33: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + (*RouteMatch_ConnectMatcher)(nil), // 34: envoy.config.route.v3.RouteMatch.ConnectMatcher + (*RouteAction_RequestMirrorPolicy)(nil), // 35: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*RouteAction_HashPolicy)(nil), // 36: envoy.config.route.v3.RouteAction.HashPolicy + (*RouteAction_UpgradeConfig)(nil), // 37: envoy.config.route.v3.RouteAction.UpgradeConfig + (*RouteAction_MaxStreamDuration)(nil), // 38: envoy.config.route.v3.RouteAction.MaxStreamDuration + (*RouteAction_HashPolicy_Header)(nil), // 39: envoy.config.route.v3.RouteAction.HashPolicy.Header + (*RouteAction_HashPolicy_CookieAttribute)(nil), // 40: envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute + (*RouteAction_HashPolicy_Cookie)(nil), // 41: envoy.config.route.v3.RouteAction.HashPolicy.Cookie + (*RouteAction_HashPolicy_ConnectionProperties)(nil), // 42: envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + (*RouteAction_HashPolicy_QueryParameter)(nil), // 43: envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + (*RouteAction_HashPolicy_FilterState)(nil), // 44: envoy.config.route.v3.RouteAction.HashPolicy.FilterState + (*RouteAction_UpgradeConfig_ConnectConfig)(nil), // 45: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + (*RetryPolicy_RetryPriority)(nil), // 46: envoy.config.route.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 47: envoy.config.route.v3.RetryPolicy.RetryHostPredicate + (*RetryPolicy_RetryBackOff)(nil), // 48: envoy.config.route.v3.RetryPolicy.RetryBackOff + (*RetryPolicy_ResetHeader)(nil), // 49: envoy.config.route.v3.RetryPolicy.ResetHeader + (*RetryPolicy_RateLimitedRetryBackOff)(nil), // 50: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + (*RateLimit_Action)(nil), // 51: envoy.config.route.v3.RateLimit.Action + (*RateLimit_Override)(nil), // 52: envoy.config.route.v3.RateLimit.Override + (*RateLimit_Action_SourceCluster)(nil), // 53: envoy.config.route.v3.RateLimit.Action.SourceCluster + (*RateLimit_Action_DestinationCluster)(nil), // 54: envoy.config.route.v3.RateLimit.Action.DestinationCluster + (*RateLimit_Action_RequestHeaders)(nil), // 55: envoy.config.route.v3.RateLimit.Action.RequestHeaders + (*RateLimit_Action_RemoteAddress)(nil), // 56: envoy.config.route.v3.RateLimit.Action.RemoteAddress + (*RateLimit_Action_MaskedRemoteAddress)(nil), // 57: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + (*RateLimit_Action_GenericKey)(nil), // 58: envoy.config.route.v3.RateLimit.Action.GenericKey + (*RateLimit_Action_HeaderValueMatch)(nil), // 59: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + (*RateLimit_Action_DynamicMetaData)(nil), // 60: envoy.config.route.v3.RateLimit.Action.DynamicMetaData + (*RateLimit_Action_MetaData)(nil), // 61: envoy.config.route.v3.RateLimit.Action.MetaData + (*RateLimit_Action_QueryParameterValueMatch)(nil), // 62: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + (*RateLimit_Override_DynamicMetadata)(nil), // 63: envoy.config.route.v3.RateLimit.Override.DynamicMetadata + (*v3.Matcher)(nil), // 64: xds.type.matcher.v3.Matcher + (*v31.HeaderValueOption)(nil), // 65: envoy.config.core.v3.HeaderValueOption + (*anypb.Any)(nil), // 66: google.protobuf.Any + (*wrapperspb.UInt32Value)(nil), // 67: google.protobuf.UInt32Value + (*v31.Metadata)(nil), // 68: envoy.config.core.v3.Metadata + (*v31.TypedExtensionConfig)(nil), // 69: envoy.config.core.v3.TypedExtensionConfig + (*v32.RegexMatcher)(nil), // 70: envoy.type.matcher.v3.RegexMatcher + (*wrapperspb.BoolValue)(nil), // 71: google.protobuf.BoolValue + (*v31.RuntimeFractionalPercent)(nil), // 72: envoy.config.core.v3.RuntimeFractionalPercent + (*v32.MetadataMatcher)(nil), // 73: envoy.type.matcher.v3.MetadataMatcher + (*v32.StringMatcher)(nil), // 74: envoy.type.matcher.v3.StringMatcher + (*v32.RegexMatchAndSubstitute)(nil), // 75: envoy.type.matcher.v3.RegexMatchAndSubstitute + (*durationpb.Duration)(nil), // 76: google.protobuf.Duration + (v31.RoutingPriority)(0), // 77: envoy.config.core.v3.RoutingPriority + (*v33.FractionalPercent)(nil), // 78: envoy.type.v3.FractionalPercent + (*v31.DataSource)(nil), // 79: envoy.config.core.v3.DataSource + (*v34.CustomTag)(nil), // 80: envoy.type.tracing.v3.CustomTag + (*v33.Int64Range)(nil), // 81: envoy.type.v3.Int64Range + (*v31.ProxyProtocolConfig)(nil), // 82: envoy.config.core.v3.ProxyProtocolConfig + (*v35.MetadataKey)(nil), // 83: envoy.type.metadata.v3.MetadataKey +} +var file_envoy_config_route_v3_route_components_proto_depIdxs = []int32{ + 9, // 0: envoy.config.route.v3.VirtualHost.routes:type_name -> envoy.config.route.v3.Route + 64, // 1: envoy.config.route.v3.VirtualHost.matcher:type_name -> xds.type.matcher.v3.Matcher + 0, // 2: envoy.config.route.v3.VirtualHost.require_tls:type_name -> envoy.config.route.v3.VirtualHost.TlsRequirementType + 22, // 3: envoy.config.route.v3.VirtualHost.virtual_clusters:type_name -> envoy.config.route.v3.VirtualCluster + 23, // 4: envoy.config.route.v3.VirtualHost.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 65, // 5: envoy.config.route.v3.VirtualHost.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 6: envoy.config.route.v3.VirtualHost.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 13, // 7: envoy.config.route.v3.VirtualHost.cors:type_name -> envoy.config.route.v3.CorsPolicy + 28, // 8: envoy.config.route.v3.VirtualHost.typed_per_filter_config:type_name -> envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + 15, // 9: envoy.config.route.v3.VirtualHost.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 66, // 10: envoy.config.route.v3.VirtualHost.retry_policy_typed_config:type_name -> google.protobuf.Any + 16, // 11: envoy.config.route.v3.VirtualHost.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 67, // 12: envoy.config.route.v3.VirtualHost.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 35, // 13: envoy.config.route.v3.VirtualHost.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 68, // 14: envoy.config.route.v3.VirtualHost.metadata:type_name -> envoy.config.core.v3.Metadata + 66, // 15: envoy.config.route.v3.FilterAction.action:type_name -> google.protobuf.Any + 9, // 16: envoy.config.route.v3.RouteList.routes:type_name -> envoy.config.route.v3.Route + 12, // 17: envoy.config.route.v3.Route.match:type_name -> envoy.config.route.v3.RouteMatch + 14, // 18: envoy.config.route.v3.Route.route:type_name -> envoy.config.route.v3.RouteAction + 17, // 19: envoy.config.route.v3.Route.redirect:type_name -> envoy.config.route.v3.RedirectAction + 18, // 20: envoy.config.route.v3.Route.direct_response:type_name -> envoy.config.route.v3.DirectResponseAction + 7, // 21: envoy.config.route.v3.Route.filter_action:type_name -> envoy.config.route.v3.FilterAction + 19, // 22: envoy.config.route.v3.Route.non_forwarding_action:type_name -> envoy.config.route.v3.NonForwardingAction + 68, // 23: envoy.config.route.v3.Route.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 24: envoy.config.route.v3.Route.decorator:type_name -> envoy.config.route.v3.Decorator + 29, // 25: envoy.config.route.v3.Route.typed_per_filter_config:type_name -> envoy.config.route.v3.Route.TypedPerFilterConfigEntry + 65, // 26: envoy.config.route.v3.Route.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 27: envoy.config.route.v3.Route.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 21, // 28: envoy.config.route.v3.Route.tracing:type_name -> envoy.config.route.v3.Tracing + 67, // 29: envoy.config.route.v3.Route.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 30, // 30: envoy.config.route.v3.WeightedCluster.clusters:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight + 67, // 31: envoy.config.route.v3.WeightedCluster.total_weight:type_name -> google.protobuf.UInt32Value + 69, // 32: envoy.config.route.v3.ClusterSpecifierPlugin.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 70, // 33: envoy.config.route.v3.RouteMatch.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher + 34, // 34: envoy.config.route.v3.RouteMatch.connect_matcher:type_name -> envoy.config.route.v3.RouteMatch.ConnectMatcher + 69, // 35: envoy.config.route.v3.RouteMatch.path_match_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 71, // 36: envoy.config.route.v3.RouteMatch.case_sensitive:type_name -> google.protobuf.BoolValue + 72, // 37: envoy.config.route.v3.RouteMatch.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 24, // 38: envoy.config.route.v3.RouteMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 25, // 39: envoy.config.route.v3.RouteMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 32, // 40: envoy.config.route.v3.RouteMatch.grpc:type_name -> envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + 33, // 41: envoy.config.route.v3.RouteMatch.tls_context:type_name -> envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + 73, // 42: envoy.config.route.v3.RouteMatch.dynamic_metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 74, // 43: envoy.config.route.v3.CorsPolicy.allow_origin_string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 71, // 44: envoy.config.route.v3.CorsPolicy.allow_credentials:type_name -> google.protobuf.BoolValue + 72, // 45: envoy.config.route.v3.CorsPolicy.filter_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 72, // 46: envoy.config.route.v3.CorsPolicy.shadow_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 47: envoy.config.route.v3.CorsPolicy.allow_private_network_access:type_name -> google.protobuf.BoolValue + 71, // 48: envoy.config.route.v3.CorsPolicy.forward_not_matching_preflights:type_name -> google.protobuf.BoolValue + 10, // 49: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster + 11, // 50: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 1, // 51: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + 68, // 52: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata + 75, // 53: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 69, // 54: envoy.config.route.v3.RouteAction.path_rewrite_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 71, // 55: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue + 75, // 56: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 57: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration + 76, // 58: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration + 69, // 59: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 60: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 66, // 61: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any + 35, // 62: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 77, // 63: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority + 23, // 64: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 71, // 65: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue + 36, // 66: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy + 13, // 67: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy + 76, // 68: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration + 76, // 69: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration + 37, // 70: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig + 26, // 71: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy + 2, // 72: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction + 67, // 73: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 16, // 74: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 38, // 75: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration + 67, // 76: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 76, // 77: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration + 76, // 78: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration + 46, // 79: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority + 47, // 80: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate + 69, // 81: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 48, // 82: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff + 50, // 83: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + 24, // 84: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 24, // 85: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 86: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value + 78, // 87: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent + 75, // 88: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 4, // 89: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode + 79, // 90: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource + 71, // 91: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue + 78, // 92: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 93: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 94: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent + 80, // 95: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 24, // 96: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 97: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value + 51, // 98: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action + 52, // 99: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override + 70, // 100: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher + 81, // 101: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range + 74, // 102: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 74, // 103: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 67, // 104: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 69, // 105: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 66, // 106: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any + 66, // 107: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 66, // 108: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 67, // 109: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value + 68, // 110: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata + 65, // 111: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 112: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 31, // 113: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + 66, // 114: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 71, // 115: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue + 71, // 116: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue + 72, // 117: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 118: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue + 39, // 119: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header + 41, // 120: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie + 42, // 121: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + 43, // 122: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + 44, // 123: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState + 71, // 124: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 45, // 125: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + 76, // 126: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration + 76, // 127: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration + 76, // 128: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration + 75, // 129: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 130: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration + 40, // 131: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.attributes:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute + 82, // 132: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 66, // 133: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 66, // 134: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 76, // 135: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration + 76, // 136: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration + 3, // 137: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + 49, // 138: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader + 76, // 139: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration + 53, // 140: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster + 54, // 141: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster + 55, // 142: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders + 56, // 143: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress + 58, // 144: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey + 59, // 145: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + 60, // 146: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData + 61, // 147: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData + 69, // 148: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 57, // 149: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + 62, // 150: envoy.config.route.v3.RateLimit.Action.query_parameter_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + 63, // 151: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata + 67, // 152: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 67, // 153: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 71, // 154: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 24, // 155: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 83, // 156: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 83, // 157: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 5, // 158: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source + 71, // 159: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 25, // 160: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 83, // 161: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 162, // [162:162] is the sub-list for method output_type + 162, // [162:162] is the sub-list for method input_type + 162, // [162:162] is the sub-list for extension type_name + 162, // [162:162] is the sub-list for extension extendee + 0, // [0:162] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_route_components_proto_init() } +func file_envoy_config_route_v3_route_components_proto_init() { + if File_envoy_config_route_v3_route_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_route_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualHost); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Route); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WeightedCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterSpecifierPlugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CorsPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HedgePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RedirectAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DirectResponseAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonForwardingAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decorator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InternalRedirectPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WeightedCluster_ClusterWeight); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_GrpcRouteMatchOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_TlsContextMatchOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_ConnectMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_RequestMirrorPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_UpgradeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_MaxStreamDuration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_CookieAttribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_Cookie); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_ConnectionProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_QueryParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_FilterState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_UpgradeConfig_ConnectConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryPriority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryHostPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryBackOff); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_ResetHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RateLimitedRetryBackOff); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Override); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_SourceCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_DestinationCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_RequestHeaders); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_RemoteAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_MaskedRemoteAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_GenericKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_HeaderValueMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_DynamicMetaData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_MetaData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_QueryParameterValueMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Override_DynamicMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Route_Route)(nil), + (*Route_Redirect)(nil), + (*Route_DirectResponse)(nil), + (*Route_FilterAction)(nil), + (*Route_NonForwardingAction)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*WeightedCluster_HeaderName)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*RouteMatch_Prefix)(nil), + (*RouteMatch_Path)(nil), + (*RouteMatch_SafeRegex)(nil), + (*RouteMatch_ConnectMatcher_)(nil), + (*RouteMatch_PathSeparatedPrefix)(nil), + (*RouteMatch_PathMatchPolicy)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*CorsPolicy_FilterEnabled)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*RouteAction_Cluster)(nil), + (*RouteAction_ClusterHeader)(nil), + (*RouteAction_WeightedClusters)(nil), + (*RouteAction_ClusterSpecifierPlugin)(nil), + (*RouteAction_InlineClusterSpecifierPlugin)(nil), + (*RouteAction_HostRewriteLiteral)(nil), + (*RouteAction_AutoHostRewrite)(nil), + (*RouteAction_HostRewriteHeader)(nil), + (*RouteAction_HostRewritePathRegex)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*RedirectAction_HttpsRedirect)(nil), + (*RedirectAction_SchemeRedirect)(nil), + (*RedirectAction_PathRedirect)(nil), + (*RedirectAction_PrefixRewrite)(nil), + (*RedirectAction_RegexRewrite)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[18].OneofWrappers = []interface{}{ + (*HeaderMatcher_ExactMatch)(nil), + (*HeaderMatcher_SafeRegexMatch)(nil), + (*HeaderMatcher_RangeMatch)(nil), + (*HeaderMatcher_PresentMatch)(nil), + (*HeaderMatcher_PrefixMatch)(nil), + (*HeaderMatcher_SuffixMatch)(nil), + (*HeaderMatcher_ContainsMatch)(nil), + (*HeaderMatcher_StringMatch)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*QueryParameterMatcher_StringMatch)(nil), + (*QueryParameterMatcher_PresentMatch)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*WeightedCluster_ClusterWeight_HostRewriteLiteral)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*RouteAction_HashPolicy_Header_)(nil), + (*RouteAction_HashPolicy_Cookie_)(nil), + (*RouteAction_HashPolicy_ConnectionProperties_)(nil), + (*RouteAction_HashPolicy_QueryParameter_)(nil), + (*RouteAction_HashPolicy_FilterState_)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[40].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryPriority_TypedConfig)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[41].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[45].OneofWrappers = []interface{}{ + (*RateLimit_Action_SourceCluster_)(nil), + (*RateLimit_Action_DestinationCluster_)(nil), + (*RateLimit_Action_RequestHeaders_)(nil), + (*RateLimit_Action_RemoteAddress_)(nil), + (*RateLimit_Action_GenericKey_)(nil), + (*RateLimit_Action_HeaderValueMatch_)(nil), + (*RateLimit_Action_DynamicMetadata)(nil), + (*RateLimit_Action_Metadata)(nil), + (*RateLimit_Action_Extension)(nil), + (*RateLimit_Action_MaskedRemoteAddress_)(nil), + (*RateLimit_Action_QueryParameterValueMatch_)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[46].OneofWrappers = []interface{}{ + (*RateLimit_Override_DynamicMetadata_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_route_components_proto_rawDesc, + NumEnums: 6, + NumMessages: 58, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_route_components_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_route_components_proto_depIdxs, + EnumInfos: file_envoy_config_route_v3_route_components_proto_enumTypes, + MessageInfos: file_envoy_config_route_v3_route_components_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_route_components_proto = out.File + file_envoy_config_route_v3_route_components_proto_rawDesc = nil + file_envoy_config_route_v3_route_components_proto_goTypes = nil + file_envoy_config_route_v3_route_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go new file mode 100644 index 000000000..ffa6daf7b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go @@ -0,0 +1,12236 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RoutingPriority(0) +) + +// Validate checks the field values on VirtualHost with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *VirtualHost) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VirtualHost with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in VirtualHostMultiError, or +// nil if none found. +func (m *VirtualHost) ValidateAll() error { + return m.validate(true) +} + +func (m *VirtualHost) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := VirtualHostValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetDomains()) < 1 { + err := VirtualHostValidationError{ + field: "Domains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDomains() { + _, _ = idx, item + + if !_VirtualHost_Domains_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("Domains[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetRoutes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := VirtualHost_TlsRequirementType_name[int32(m.GetRequireTls())]; !ok { + err := VirtualHostValidationError{ + field: "RequireTls", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetVirtualClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRateLimits() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := VirtualHostValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_VirtualHost_RequestHeadersToRemove_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := VirtualHostValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_VirtualHost_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetCors()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCors()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + // no validation rules for IncludeRequestAttemptCount + + // no validation rules for IncludeAttemptCountInResponse + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicyTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicyTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHedgePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHedgePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IncludeIsTimeoutRetryHeader + + if all { + switch v := interface{}(m.GetPerRequestBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerRequestBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return VirtualHostMultiError(errors) + } + + return nil +} + +// VirtualHostMultiError is an error wrapping multiple validation errors +// returned by VirtualHost.ValidateAll() if the designated constraints aren't met. +type VirtualHostMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VirtualHostMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VirtualHostMultiError) AllErrors() []error { return m } + +// VirtualHostValidationError is the validation error returned by +// VirtualHost.Validate if the designated constraints aren't met. +type VirtualHostValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VirtualHostValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VirtualHostValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VirtualHostValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VirtualHostValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VirtualHostValidationError) ErrorName() string { return "VirtualHostValidationError" } + +// Error satisfies the builtin error interface +func (e VirtualHostValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVirtualHost.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VirtualHostValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VirtualHostValidationError{} + +var _VirtualHost_Domains_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _VirtualHost_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _VirtualHost_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on FilterAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterActionMultiError, or +// nil if none found. +func (m *FilterAction) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterActionMultiError(errors) + } + + return nil +} + +// FilterActionMultiError is an error wrapping multiple validation errors +// returned by FilterAction.ValidateAll() if the designated constraints aren't met. +type FilterActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterActionMultiError) AllErrors() []error { return m } + +// FilterActionValidationError is the validation error returned by +// FilterAction.Validate if the designated constraints aren't met. +type FilterActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterActionValidationError) ErrorName() string { return "FilterActionValidationError" } + +// Error satisfies the builtin error interface +func (e FilterActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterActionValidationError{} + +// Validate checks the field values on RouteList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteList with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteListMultiError, or nil +// if none found. +func (m *RouteList) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRoutes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouteListMultiError(errors) + } + + return nil +} + +// RouteListMultiError is an error wrapping multiple validation errors returned +// by RouteList.ValidateAll() if the designated constraints aren't met. +type RouteListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteListMultiError) AllErrors() []error { return m } + +// RouteListValidationError is the validation error returned by +// RouteList.Validate if the designated constraints aren't met. +type RouteListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteListValidationError) ErrorName() string { return "RouteListValidationError" } + +// Error satisfies the builtin error interface +func (e RouteListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteListValidationError{} + +// Validate checks the field values on Route with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Route) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Route with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RouteMultiError, or nil if none found. +func (m *Route) ValidateAll() error { + return m.validate(true) +} + +func (m *Route) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if m.GetMatch() == nil { + err := RouteValidationError{ + field: "Match", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDecorator()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDecorator()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := RouteValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_Route_RequestHeadersToRemove_Pattern.MatchString(item) { + err := RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := RouteValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_Route_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerRequestBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerRequestBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StatPrefix + + oneofActionPresent := false + switch v := m.Action.(type) { + case *Route_Route: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetRoute()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoute()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_Redirect: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetRedirect()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRedirect()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_DirectResponse: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetDirectResponse()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDirectResponse()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_FilterAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetFilterAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_NonForwardingAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetNonForwardingAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNonForwardingAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofActionPresent { + err := RouteValidationError{ + field: "Action", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteMultiError(errors) + } + + return nil +} + +// RouteMultiError is an error wrapping multiple validation errors returned by +// Route.ValidateAll() if the designated constraints aren't met. +type RouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMultiError) AllErrors() []error { return m } + +// RouteValidationError is the validation error returned by Route.Validate if +// the designated constraints aren't met. +type RouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteValidationError) ErrorName() string { return "RouteValidationError" } + +// Error satisfies the builtin error interface +func (e RouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteValidationError{} + +var _Route_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _Route_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on WeightedCluster with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WeightedCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WeightedCluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WeightedClusterMultiError, or nil if none found. +func (m *WeightedCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *WeightedCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetClusters()) < 1 { + err := WeightedClusterValidationError{ + field: "Clusters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTotalWeight()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTotalWeight()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RuntimeKeyPrefix + + switch v := m.RandomValueSpecifier.(type) { + case *WeightedCluster_HeaderName: + if v == nil { + err := WeightedClusterValidationError{ + field: "RandomValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_WeightedCluster_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := WeightedClusterValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return WeightedClusterMultiError(errors) + } + + return nil +} + +// WeightedClusterMultiError is an error wrapping multiple validation errors +// returned by WeightedCluster.ValidateAll() if the designated constraints +// aren't met. +type WeightedClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WeightedClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WeightedClusterMultiError) AllErrors() []error { return m } + +// WeightedClusterValidationError is the validation error returned by +// WeightedCluster.Validate if the designated constraints aren't met. +type WeightedClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WeightedClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WeightedClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WeightedClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WeightedClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WeightedClusterValidationError) ErrorName() string { return "WeightedClusterValidationError" } + +// Error satisfies the builtin error interface +func (e WeightedClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWeightedCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WeightedClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WeightedClusterValidationError{} + +var _WeightedCluster_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on ClusterSpecifierPlugin with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterSpecifierPlugin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterSpecifierPlugin with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterSpecifierPluginMultiError, or nil if none found. +func (m *ClusterSpecifierPlugin) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterSpecifierPlugin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExtension() == nil { + err := ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + if len(errors) > 0 { + return ClusterSpecifierPluginMultiError(errors) + } + + return nil +} + +// ClusterSpecifierPluginMultiError is an error wrapping multiple validation +// errors returned by ClusterSpecifierPlugin.ValidateAll() if the designated +// constraints aren't met. +type ClusterSpecifierPluginMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterSpecifierPluginMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterSpecifierPluginMultiError) AllErrors() []error { return m } + +// ClusterSpecifierPluginValidationError is the validation error returned by +// ClusterSpecifierPlugin.Validate if the designated constraints aren't met. +type ClusterSpecifierPluginValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterSpecifierPluginValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterSpecifierPluginValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterSpecifierPluginValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterSpecifierPluginValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterSpecifierPluginValidationError) ErrorName() string { + return "ClusterSpecifierPluginValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterSpecifierPluginValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterSpecifierPlugin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterSpecifierPluginValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterSpecifierPluginValidationError{} + +// Validate checks the field values on RouteMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteMatchMultiError, or +// nil if none found. +func (m *RouteMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCaseSensitive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCaseSensitive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRuntimeFraction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFraction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetQueryParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetDynamicMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + oneofPathSpecifierPresent := false + switch v := m.PathSpecifier.(type) { + case *RouteMatch_Prefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + // no validation rules for Prefix + case *RouteMatch_Path: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + // no validation rules for Path + case *RouteMatch_SafeRegex: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if m.GetSafeRegex() == nil { + err := RouteMatchValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteMatch_ConnectMatcher_: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if all { + switch v := interface{}(m.GetConnectMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteMatch_PathSeparatedPrefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if !_RouteMatch_PathSeparatedPrefix_Pattern.MatchString(m.GetPathSeparatedPrefix()) { + err := RouteMatchValidationError{ + field: "PathSeparatedPrefix", + reason: "value does not match regex pattern \"^[^?#]+[^?#/]$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteMatch_PathMatchPolicy: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if all { + switch v := interface{}(m.GetPathMatchPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathMatchPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPathSpecifierPresent { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteMatchMultiError(errors) + } + + return nil +} + +// RouteMatchMultiError is an error wrapping multiple validation errors +// returned by RouteMatch.ValidateAll() if the designated constraints aren't met. +type RouteMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatchMultiError) AllErrors() []error { return m } + +// RouteMatchValidationError is the validation error returned by +// RouteMatch.Validate if the designated constraints aren't met. +type RouteMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatchValidationError) ErrorName() string { return "RouteMatchValidationError" } + +// Error satisfies the builtin error interface +func (e RouteMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatchValidationError{} + +var _RouteMatch_PathSeparatedPrefix_Pattern = regexp.MustCompile("^[^?#]+[^?#/]$") + +// Validate checks the field values on CorsPolicy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CorsPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CorsPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CorsPolicyMultiError, or +// nil if none found. +func (m *CorsPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *CorsPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAllowOriginStringMatch() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AllowMethods + + // no validation rules for AllowHeaders + + // no validation rules for ExposeHeaders + + // no validation rules for MaxAge + + if all { + switch v := interface{}(m.GetAllowCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAllowPrivateNetworkAccess()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowPrivateNetworkAccess()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetForwardNotMatchingPreflights()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetForwardNotMatchingPreflights()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.EnabledSpecifier.(type) { + case *CorsPolicy_FilterEnabled: + if v == nil { + err := CorsPolicyValidationError{ + field: "EnabledSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilterEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CorsPolicyMultiError(errors) + } + + return nil +} + +// CorsPolicyMultiError is an error wrapping multiple validation errors +// returned by CorsPolicy.ValidateAll() if the designated constraints aren't met. +type CorsPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CorsPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CorsPolicyMultiError) AllErrors() []error { return m } + +// CorsPolicyValidationError is the validation error returned by +// CorsPolicy.Validate if the designated constraints aren't met. +type CorsPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CorsPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CorsPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CorsPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CorsPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CorsPolicyValidationError) ErrorName() string { return "CorsPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e CorsPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCorsPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CorsPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CorsPolicyValidationError{} + +// Validate checks the field values on RouteAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteActionMultiError, or +// nil if none found. +func (m *RouteAction) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RouteAction_ClusterNotFoundResponseCode_name[int32(m.GetClusterNotFoundResponseCode())]; !ok { + err := RouteActionValidationError{ + field: "ClusterNotFoundResponseCode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_RouteAction_PrefixRewrite_Pattern.MatchString(m.GetPrefixRewrite()) { + err := RouteActionValidationError{ + field: "PrefixRewrite", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPathRewritePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathRewritePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AppendXForwardedHost + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEarlyDataPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEarlyDataPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicyTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicyTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok { + err := RouteActionValidationError{ + field: "Priority", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRateLimits() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetIncludeVhRateLimits()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIncludeVhRateLimits()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHashPolicy() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCors()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCors()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxGrpcTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxGrpcTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutOffset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutOffset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetUpgradeConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInternalRedirectPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalRedirectPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for InternalRedirectAction + + if all { + switch v := interface{}(m.GetMaxInternalRedirects()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInternalRedirects()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHedgePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHedgePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofClusterSpecifierPresent := false + switch v := m.ClusterSpecifier.(type) { + case *RouteAction_Cluster: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if utf8.RuneCountInString(m.GetCluster()) < 1 { + err := RouteActionValidationError{ + field: "Cluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_ClusterHeader: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if utf8.RuneCountInString(m.GetClusterHeader()) < 1 { + err := RouteActionValidationError{ + field: "ClusterHeader", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := RouteActionValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_WeightedClusters: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetWeightedClusters()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightedClusters()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_ClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + // no validation rules for ClusterSpecifierPlugin + case *RouteAction_InlineClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetInlineClusterSpecifierPlugin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInlineClusterSpecifierPlugin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofClusterSpecifierPresent { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + switch v := m.HostRewriteSpecifier.(type) { + case *RouteAction_HostRewriteLiteral: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { + err := RouteActionValidationError{ + field: "HostRewriteLiteral", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_AutoHostRewrite: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAutoHostRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAutoHostRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HostRewriteHeader: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HostRewriteHeader_Pattern.MatchString(m.GetHostRewriteHeader()) { + err := RouteActionValidationError{ + field: "HostRewriteHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_HostRewritePathRegex: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHostRewritePathRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHostRewritePathRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RouteActionMultiError(errors) + } + + return nil +} + +// RouteActionMultiError is an error wrapping multiple validation errors +// returned by RouteAction.ValidateAll() if the designated constraints aren't met. +type RouteActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteActionMultiError) AllErrors() []error { return m } + +// RouteActionValidationError is the validation error returned by +// RouteAction.Validate if the designated constraints aren't met. +type RouteActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteActionValidationError) ErrorName() string { return "RouteActionValidationError" } + +// Error satisfies the builtin error interface +func (e RouteActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteActionValidationError{} + +var _RouteAction_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_PrefixRewrite_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HostRewriteLiteral_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HostRewriteHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RetryPolicyMultiError, or +// nil if none found. +func (m *RetryPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RetryOn + + if all { + switch v := interface{}(m.GetNumRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerTryTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerTryTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerTryIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerTryIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPriority()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPriority()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetryHostPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetryOptionsPredicates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for HostSelectionRetryMaxAttempts + + if all { + switch v := interface{}(m.GetRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRateLimitedRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRateLimitedRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetriableHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetriableRequestHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RetryPolicyMultiError(errors) + } + + return nil +} + +// RetryPolicyMultiError is an error wrapping multiple validation errors +// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met. +type RetryPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicyMultiError) AllErrors() []error { return m } + +// RetryPolicyValidationError is the validation error returned by +// RetryPolicy.Validate if the designated constraints aren't met. +type RetryPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e RetryPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicyValidationError{} + +// Validate checks the field values on HedgePolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HedgePolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HedgePolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HedgePolicyMultiError, or +// nil if none found. +func (m *HedgePolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *HedgePolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetInitialRequests(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := HedgePolicyValidationError{ + field: "InitialRequests", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetAdditionalRequestChance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdditionalRequestChance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HedgeOnPerTryTimeout + + if len(errors) > 0 { + return HedgePolicyMultiError(errors) + } + + return nil +} + +// HedgePolicyMultiError is an error wrapping multiple validation errors +// returned by HedgePolicy.ValidateAll() if the designated constraints aren't met. +type HedgePolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HedgePolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HedgePolicyMultiError) AllErrors() []error { return m } + +// HedgePolicyValidationError is the validation error returned by +// HedgePolicy.Validate if the designated constraints aren't met. +type HedgePolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HedgePolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HedgePolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HedgePolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HedgePolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HedgePolicyValidationError) ErrorName() string { return "HedgePolicyValidationError" } + +// Error satisfies the builtin error interface +func (e HedgePolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHedgePolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HedgePolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HedgePolicyValidationError{} + +// Validate checks the field values on RedirectAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RedirectAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RedirectAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RedirectActionMultiError, +// or nil if none found. +func (m *RedirectAction) ValidateAll() error { + return m.validate(true) +} + +func (m *RedirectAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_RedirectAction_HostRedirect_Pattern.MatchString(m.GetHostRedirect()) { + err := RedirectActionValidationError{ + field: "HostRedirect", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for PortRedirect + + if _, ok := RedirectAction_RedirectResponseCode_name[int32(m.GetResponseCode())]; !ok { + err := RedirectActionValidationError{ + field: "ResponseCode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for StripQuery + + switch v := m.SchemeRewriteSpecifier.(type) { + case *RedirectAction_HttpsRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for HttpsRedirect + case *RedirectAction_SchemeRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for SchemeRedirect + default: + _ = v // ensures v is used + } + switch v := m.PathRewriteSpecifier.(type) { + case *RedirectAction_PathRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RedirectAction_PathRedirect_Pattern.MatchString(m.GetPathRedirect()) { + err := RedirectActionValidationError{ + field: "PathRedirect", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RedirectAction_PrefixRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RedirectAction_PrefixRewrite_Pattern.MatchString(m.GetPrefixRewrite()) { + err := RedirectActionValidationError{ + field: "PrefixRewrite", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RedirectAction_RegexRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RedirectActionMultiError(errors) + } + + return nil +} + +// RedirectActionMultiError is an error wrapping multiple validation errors +// returned by RedirectAction.ValidateAll() if the designated constraints +// aren't met. +type RedirectActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RedirectActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RedirectActionMultiError) AllErrors() []error { return m } + +// RedirectActionValidationError is the validation error returned by +// RedirectAction.Validate if the designated constraints aren't met. +type RedirectActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RedirectActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RedirectActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RedirectActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RedirectActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RedirectActionValidationError) ErrorName() string { return "RedirectActionValidationError" } + +// Error satisfies the builtin error interface +func (e RedirectActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRedirectAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RedirectActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RedirectActionValidationError{} + +var _RedirectAction_HostRedirect_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RedirectAction_PathRedirect_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RedirectAction_PrefixRewrite_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on DirectResponseAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DirectResponseAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DirectResponseAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DirectResponseActionMultiError, or nil if none found. +func (m *DirectResponseAction) ValidateAll() error { + return m.validate(true) +} + +func (m *DirectResponseAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetStatus(); val < 200 || val >= 600 { + err := DirectResponseActionValidationError{ + field: "Status", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetBody()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBody()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DirectResponseActionMultiError(errors) + } + + return nil +} + +// DirectResponseActionMultiError is an error wrapping multiple validation +// errors returned by DirectResponseAction.ValidateAll() if the designated +// constraints aren't met. +type DirectResponseActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DirectResponseActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DirectResponseActionMultiError) AllErrors() []error { return m } + +// DirectResponseActionValidationError is the validation error returned by +// DirectResponseAction.Validate if the designated constraints aren't met. +type DirectResponseActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DirectResponseActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DirectResponseActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DirectResponseActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DirectResponseActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DirectResponseActionValidationError) ErrorName() string { + return "DirectResponseActionValidationError" +} + +// Error satisfies the builtin error interface +func (e DirectResponseActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDirectResponseAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DirectResponseActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DirectResponseActionValidationError{} + +// Validate checks the field values on NonForwardingAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NonForwardingAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NonForwardingAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NonForwardingActionMultiError, or nil if none found. +func (m *NonForwardingAction) ValidateAll() error { + return m.validate(true) +} + +func (m *NonForwardingAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return NonForwardingActionMultiError(errors) + } + + return nil +} + +// NonForwardingActionMultiError is an error wrapping multiple validation +// errors returned by NonForwardingAction.ValidateAll() if the designated +// constraints aren't met. +type NonForwardingActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NonForwardingActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NonForwardingActionMultiError) AllErrors() []error { return m } + +// NonForwardingActionValidationError is the validation error returned by +// NonForwardingAction.Validate if the designated constraints aren't met. +type NonForwardingActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NonForwardingActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NonForwardingActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NonForwardingActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NonForwardingActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NonForwardingActionValidationError) ErrorName() string { + return "NonForwardingActionValidationError" +} + +// Error satisfies the builtin error interface +func (e NonForwardingActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNonForwardingAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NonForwardingActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NonForwardingActionValidationError{} + +// Validate checks the field values on Decorator with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Decorator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Decorator with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DecoratorMultiError, or nil +// if none found. +func (m *Decorator) ValidateAll() error { + return m.validate(true) +} + +func (m *Decorator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetOperation()) < 1 { + err := DecoratorValidationError{ + field: "Operation", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPropagate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPropagate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DecoratorMultiError(errors) + } + + return nil +} + +// DecoratorMultiError is an error wrapping multiple validation errors returned +// by Decorator.ValidateAll() if the designated constraints aren't met. +type DecoratorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DecoratorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DecoratorMultiError) AllErrors() []error { return m } + +// DecoratorValidationError is the validation error returned by +// Decorator.Validate if the designated constraints aren't met. +type DecoratorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DecoratorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DecoratorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DecoratorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DecoratorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DecoratorValidationError) ErrorName() string { return "DecoratorValidationError" } + +// Error satisfies the builtin error interface +func (e DecoratorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDecorator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DecoratorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DecoratorValidationError{} + +// Validate checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TracingMultiError, or nil if none found. +func (m *Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetClientSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRandomSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRandomSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverallSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverallSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TracingMultiError(errors) + } + + return nil +} + +// TracingMultiError is an error wrapping multiple validation errors returned +// by Tracing.ValidateAll() if the designated constraints aren't met. +type TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TracingMultiError) AllErrors() []error { return m } + +// TracingValidationError is the validation error returned by Tracing.Validate +// if the designated constraints aren't met. +type TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TracingValidationError) ErrorName() string { return "TracingValidationError" } + +// Error satisfies the builtin error interface +func (e TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TracingValidationError{} + +// Validate checks the field values on VirtualCluster with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *VirtualCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VirtualCluster with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in VirtualClusterMultiError, +// or nil if none found. +func (m *VirtualCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *VirtualCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := VirtualClusterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return VirtualClusterMultiError(errors) + } + + return nil +} + +// VirtualClusterMultiError is an error wrapping multiple validation errors +// returned by VirtualCluster.ValidateAll() if the designated constraints +// aren't met. +type VirtualClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VirtualClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VirtualClusterMultiError) AllErrors() []error { return m } + +// VirtualClusterValidationError is the validation error returned by +// VirtualCluster.Validate if the designated constraints aren't met. +type VirtualClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VirtualClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VirtualClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VirtualClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VirtualClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VirtualClusterValidationError) ErrorName() string { return "VirtualClusterValidationError" } + +// Error satisfies the builtin error interface +func (e VirtualClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVirtualCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VirtualClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VirtualClusterValidationError{} + +// Validate checks the field values on RateLimit with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RateLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RateLimitMultiError, or nil +// if none found. +func (m *RateLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetStage(); wrapper != nil { + + if wrapper.GetValue() > 10 { + err := RateLimitValidationError{ + field: "Stage", + reason: "value must be less than or equal to 10", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for DisableKey + + if len(m.GetActions()) < 1 { + err := RateLimitValidationError{ + field: "Actions", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RateLimitMultiError(errors) + } + + return nil +} + +// RateLimitMultiError is an error wrapping multiple validation errors returned +// by RateLimit.ValidateAll() if the designated constraints aren't met. +type RateLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitMultiError) AllErrors() []error { return m } + +// RateLimitValidationError is the validation error returned by +// RateLimit.Validate if the designated constraints aren't met. +type RateLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitValidationError) ErrorName() string { return "RateLimitValidationError" } + +// Error satisfies the builtin error interface +func (e RateLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitValidationError{} + +// Validate checks the field values on HeaderMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMatcherMultiError, or +// nil if none found. +func (m *HeaderMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HeaderMatcherValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderMatcher_Name_Pattern.MatchString(m.GetName()) { + err := HeaderMatcherValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for InvertMatch + + // no validation rules for TreatMissingHeaderAsEmpty + + switch v := m.HeaderMatchSpecifier.(type) { + case *HeaderMatcher_ExactMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for ExactMatch + case *HeaderMatcher_SafeRegexMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegexMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegexMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HeaderMatcher_RangeMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRangeMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRangeMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HeaderMatcher_PresentMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for PresentMatch + case *HeaderMatcher_PrefixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetPrefixMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "PrefixMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_SuffixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetSuffixMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "SuffixMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_ContainsMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetContainsMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "ContainsMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_StringMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HeaderMatcherMultiError(errors) + } + + return nil +} + +// HeaderMatcherMultiError is an error wrapping multiple validation errors +// returned by HeaderMatcher.ValidateAll() if the designated constraints +// aren't met. +type HeaderMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderMatcherMultiError) AllErrors() []error { return m } + +// HeaderMatcherValidationError is the validation error returned by +// HeaderMatcher.Validate if the designated constraints aren't met. +type HeaderMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderMatcherValidationError) ErrorName() string { return "HeaderMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderMatcherValidationError{} + +var _HeaderMatcher_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on QueryParameterMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QueryParameterMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QueryParameterMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QueryParameterMatcherMultiError, or nil if none found. +func (m *QueryParameterMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *QueryParameterMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := QueryParameterMatcherValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetName()) > 1024 { + err := QueryParameterMatcherValidationError{ + field: "Name", + reason: "value length must be at most 1024 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.QueryParameterMatchSpecifier.(type) { + case *QueryParameterMatcher_StringMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetStringMatch() == nil { + err := QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *QueryParameterMatcher_PresentMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for PresentMatch + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return QueryParameterMatcherMultiError(errors) + } + + return nil +} + +// QueryParameterMatcherMultiError is an error wrapping multiple validation +// errors returned by QueryParameterMatcher.ValidateAll() if the designated +// constraints aren't met. +type QueryParameterMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QueryParameterMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QueryParameterMatcherMultiError) AllErrors() []error { return m } + +// QueryParameterMatcherValidationError is the validation error returned by +// QueryParameterMatcher.Validate if the designated constraints aren't met. +type QueryParameterMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QueryParameterMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QueryParameterMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QueryParameterMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QueryParameterMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QueryParameterMatcherValidationError) ErrorName() string { + return "QueryParameterMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e QueryParameterMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQueryParameterMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QueryParameterMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QueryParameterMatcherValidationError{} + +// Validate checks the field values on InternalRedirectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *InternalRedirectPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on InternalRedirectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// InternalRedirectPolicyMultiError, or nil if none found. +func (m *InternalRedirectPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *InternalRedirectPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxInternalRedirects()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInternalRedirects()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRedirectResponseCodes()) > 5 { + err := InternalRedirectPolicyValidationError{ + field: "RedirectResponseCodes", + reason: "value must contain no more than 5 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AllowCrossSchemeRedirect + + _InternalRedirectPolicy_ResponseHeadersToCopy_Unique := make(map[string]struct{}, len(m.GetResponseHeadersToCopy())) + + for idx, item := range m.GetResponseHeadersToCopy() { + _, _ = idx, item + + if _, exists := _InternalRedirectPolicy_ResponseHeadersToCopy_Unique[item]; exists { + err := InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("ResponseHeadersToCopy[%v]", idx), + reason: "repeated value must contain unique items", + } + if !all { + return err + } + errors = append(errors, err) + } else { + _InternalRedirectPolicy_ResponseHeadersToCopy_Unique[item] = struct{}{} + } + + if !_InternalRedirectPolicy_ResponseHeadersToCopy_Pattern.MatchString(item) { + err := InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("ResponseHeadersToCopy[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return InternalRedirectPolicyMultiError(errors) + } + + return nil +} + +// InternalRedirectPolicyMultiError is an error wrapping multiple validation +// errors returned by InternalRedirectPolicy.ValidateAll() if the designated +// constraints aren't met. +type InternalRedirectPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m InternalRedirectPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m InternalRedirectPolicyMultiError) AllErrors() []error { return m } + +// InternalRedirectPolicyValidationError is the validation error returned by +// InternalRedirectPolicy.Validate if the designated constraints aren't met. +type InternalRedirectPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e InternalRedirectPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e InternalRedirectPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e InternalRedirectPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e InternalRedirectPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e InternalRedirectPolicyValidationError) ErrorName() string { + return "InternalRedirectPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e InternalRedirectPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInternalRedirectPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = InternalRedirectPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = InternalRedirectPolicyValidationError{} + +var _InternalRedirectPolicy_ResponseHeadersToCopy_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on FilterConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterConfigMultiError, or +// nil if none found. +func (m *FilterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + // no validation rules for Disabled + + if len(errors) > 0 { + return FilterConfigMultiError(errors) + } + + return nil +} + +// FilterConfigMultiError is an error wrapping multiple validation errors +// returned by FilterConfig.ValidateAll() if the designated constraints aren't met. +type FilterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterConfigMultiError) AllErrors() []error { return m } + +// FilterConfigValidationError is the validation error returned by +// FilterConfig.Validate if the designated constraints aren't met. +type FilterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterConfigValidationError) ErrorName() string { return "FilterConfigValidationError" } + +// Error satisfies the builtin error interface +func (e FilterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterConfigValidationError{} + +// Validate checks the field values on WeightedCluster_ClusterWeight with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *WeightedCluster_ClusterWeight) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WeightedCluster_ClusterWeight with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// WeightedCluster_ClusterWeightMultiError, or nil if none found. +func (m *WeightedCluster_ClusterWeight) ValidateAll() error { + return m.validate(true) +} + +func (m *WeightedCluster_ClusterWeight) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if !_WeightedCluster_ClusterWeight_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := WeightedCluster_ClusterWeightValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWeight()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeight()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadataMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := WeightedCluster_ClusterWeightValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_WeightedCluster_ClusterWeight_RequestHeadersToRemove_Pattern.MatchString(item) { + err := WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := WeightedCluster_ClusterWeightValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if !_WeightedCluster_ClusterWeight_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + switch v := m.HostRewriteSpecifier.(type) { + case *WeightedCluster_ClusterWeight_HostRewriteLiteral: + if v == nil { + err := WeightedCluster_ClusterWeightValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_WeightedCluster_ClusterWeight_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { + err := WeightedCluster_ClusterWeightValidationError{ + field: "HostRewriteLiteral", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return WeightedCluster_ClusterWeightMultiError(errors) + } + + return nil +} + +// WeightedCluster_ClusterWeightMultiError is an error wrapping multiple +// validation errors returned by WeightedCluster_ClusterWeight.ValidateAll() +// if the designated constraints aren't met. +type WeightedCluster_ClusterWeightMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WeightedCluster_ClusterWeightMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WeightedCluster_ClusterWeightMultiError) AllErrors() []error { return m } + +// WeightedCluster_ClusterWeightValidationError is the validation error +// returned by WeightedCluster_ClusterWeight.Validate if the designated +// constraints aren't met. +type WeightedCluster_ClusterWeightValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WeightedCluster_ClusterWeightValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WeightedCluster_ClusterWeightValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WeightedCluster_ClusterWeightValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WeightedCluster_ClusterWeightValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WeightedCluster_ClusterWeightValidationError) ErrorName() string { + return "WeightedCluster_ClusterWeightValidationError" +} + +// Error satisfies the builtin error interface +func (e WeightedCluster_ClusterWeightValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWeightedCluster_ClusterWeight.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WeightedCluster_ClusterWeightValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WeightedCluster_ClusterWeightValidationError{} + +var _WeightedCluster_ClusterWeight_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_HostRewriteLiteral_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteMatch_GrpcRouteMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteMatch_GrpcRouteMatchOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_GrpcRouteMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteMatch_GrpcRouteMatchOptionsMultiError, or nil if none found. +func (m *RouteMatch_GrpcRouteMatchOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_GrpcRouteMatchOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RouteMatch_GrpcRouteMatchOptionsMultiError(errors) + } + + return nil +} + +// RouteMatch_GrpcRouteMatchOptionsMultiError is an error wrapping multiple +// validation errors returned by +// RouteMatch_GrpcRouteMatchOptions.ValidateAll() if the designated +// constraints aren't met. +type RouteMatch_GrpcRouteMatchOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_GrpcRouteMatchOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_GrpcRouteMatchOptionsMultiError) AllErrors() []error { return m } + +// RouteMatch_GrpcRouteMatchOptionsValidationError is the validation error +// returned by RouteMatch_GrpcRouteMatchOptions.Validate if the designated +// constraints aren't met. +type RouteMatch_GrpcRouteMatchOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) ErrorName() string { + return "RouteMatch_GrpcRouteMatchOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_GrpcRouteMatchOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_GrpcRouteMatchOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_GrpcRouteMatchOptionsValidationError{} + +// Validate checks the field values on RouteMatch_TlsContextMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteMatch_TlsContextMatchOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_TlsContextMatchOptions +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteMatch_TlsContextMatchOptionsMultiError, or nil if none found. +func (m *RouteMatch_TlsContextMatchOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_TlsContextMatchOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPresented()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPresented()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteMatch_TlsContextMatchOptionsMultiError(errors) + } + + return nil +} + +// RouteMatch_TlsContextMatchOptionsMultiError is an error wrapping multiple +// validation errors returned by +// RouteMatch_TlsContextMatchOptions.ValidateAll() if the designated +// constraints aren't met. +type RouteMatch_TlsContextMatchOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_TlsContextMatchOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_TlsContextMatchOptionsMultiError) AllErrors() []error { return m } + +// RouteMatch_TlsContextMatchOptionsValidationError is the validation error +// returned by RouteMatch_TlsContextMatchOptions.Validate if the designated +// constraints aren't met. +type RouteMatch_TlsContextMatchOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_TlsContextMatchOptionsValidationError) ErrorName() string { + return "RouteMatch_TlsContextMatchOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_TlsContextMatchOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_TlsContextMatchOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_TlsContextMatchOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_TlsContextMatchOptionsValidationError{} + +// Validate checks the field values on RouteMatch_ConnectMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteMatch_ConnectMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_ConnectMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteMatch_ConnectMatcherMultiError, or nil if none found. +func (m *RouteMatch_ConnectMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_ConnectMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RouteMatch_ConnectMatcherMultiError(errors) + } + + return nil +} + +// RouteMatch_ConnectMatcherMultiError is an error wrapping multiple validation +// errors returned by RouteMatch_ConnectMatcher.ValidateAll() if the +// designated constraints aren't met. +type RouteMatch_ConnectMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_ConnectMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_ConnectMatcherMultiError) AllErrors() []error { return m } + +// RouteMatch_ConnectMatcherValidationError is the validation error returned by +// RouteMatch_ConnectMatcher.Validate if the designated constraints aren't met. +type RouteMatch_ConnectMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_ConnectMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_ConnectMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_ConnectMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_ConnectMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_ConnectMatcherValidationError) ErrorName() string { + return "RouteMatch_ConnectMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_ConnectMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_ConnectMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_ConnectMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_ConnectMatcherValidationError{} + +// Validate checks the field values on RouteAction_RequestMirrorPolicy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_RequestMirrorPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_RequestMirrorPolicy with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_RequestMirrorPolicyMultiError, or nil if none found. +func (m *RouteAction_RequestMirrorPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_RequestMirrorPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Cluster + + if !_RouteAction_RequestMirrorPolicy_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := RouteAction_RequestMirrorPolicyValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRuntimeFraction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFraction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTraceSampled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceSampled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableShadowHostSuffixAppend + + if len(errors) > 0 { + return RouteAction_RequestMirrorPolicyMultiError(errors) + } + + return nil +} + +// RouteAction_RequestMirrorPolicyMultiError is an error wrapping multiple +// validation errors returned by RouteAction_RequestMirrorPolicy.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_RequestMirrorPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_RequestMirrorPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_RequestMirrorPolicyMultiError) AllErrors() []error { return m } + +// RouteAction_RequestMirrorPolicyValidationError is the validation error +// returned by RouteAction_RequestMirrorPolicy.Validate if the designated +// constraints aren't met. +type RouteAction_RequestMirrorPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_RequestMirrorPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_RequestMirrorPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_RequestMirrorPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_RequestMirrorPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_RequestMirrorPolicyValidationError) ErrorName() string { + return "RouteAction_RequestMirrorPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_RequestMirrorPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_RequestMirrorPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_RequestMirrorPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_RequestMirrorPolicyValidationError{} + +var _RouteAction_RequestMirrorPolicy_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicyMultiError, or nil if none found. +func (m *RouteAction_HashPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Terminal + + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { + case *RouteAction_HashPolicy_Header_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_Cookie_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetCookie()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCookie()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_ConnectionProperties_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetConnectionProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_QueryParameter_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetQueryParameter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryParameter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_FilterState_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicyMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicyMultiError is an error wrapping multiple validation +// errors returned by RouteAction_HashPolicy.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicyMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicyValidationError is the validation error returned by +// RouteAction_HashPolicy.Validate if the designated constraints aren't met. +type RouteAction_HashPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicyValidationError) ErrorName() string { + return "RouteAction_HashPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicyValidationError{} + +// Validate checks the field values on RouteAction_UpgradeConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_UpgradeConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_UpgradeConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteAction_UpgradeConfigMultiError, or nil if none found. +func (m *RouteAction_UpgradeConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_UpgradeConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetUpgradeType()) < 1 { + err := RouteAction_UpgradeConfigValidationError{ + field: "UpgradeType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_UpgradeConfig_UpgradeType_Pattern.MatchString(m.GetUpgradeType()) { + err := RouteAction_UpgradeConfigValidationError{ + field: "UpgradeType", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_UpgradeConfigMultiError(errors) + } + + return nil +} + +// RouteAction_UpgradeConfigMultiError is an error wrapping multiple validation +// errors returned by RouteAction_UpgradeConfig.ValidateAll() if the +// designated constraints aren't met. +type RouteAction_UpgradeConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_UpgradeConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_UpgradeConfigMultiError) AllErrors() []error { return m } + +// RouteAction_UpgradeConfigValidationError is the validation error returned by +// RouteAction_UpgradeConfig.Validate if the designated constraints aren't met. +type RouteAction_UpgradeConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_UpgradeConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_UpgradeConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_UpgradeConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_UpgradeConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_UpgradeConfigValidationError) ErrorName() string { + return "RouteAction_UpgradeConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_UpgradeConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_UpgradeConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_UpgradeConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_UpgradeConfigValidationError{} + +var _RouteAction_UpgradeConfig_UpgradeType_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_MaxStreamDuration with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_MaxStreamDuration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_MaxStreamDuration with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_MaxStreamDurationMultiError, or nil if none found. +func (m *RouteAction_MaxStreamDuration) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_MaxStreamDuration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutHeaderMax()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutHeaderMax()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutHeaderOffset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutHeaderOffset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_MaxStreamDurationMultiError(errors) + } + + return nil +} + +// RouteAction_MaxStreamDurationMultiError is an error wrapping multiple +// validation errors returned by RouteAction_MaxStreamDuration.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_MaxStreamDurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_MaxStreamDurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_MaxStreamDurationMultiError) AllErrors() []error { return m } + +// RouteAction_MaxStreamDurationValidationError is the validation error +// returned by RouteAction_MaxStreamDuration.Validate if the designated +// constraints aren't met. +type RouteAction_MaxStreamDurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_MaxStreamDurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_MaxStreamDurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_MaxStreamDurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_MaxStreamDurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_MaxStreamDurationValidationError) ErrorName() string { + return "RouteAction_MaxStreamDurationValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_MaxStreamDurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_MaxStreamDuration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_MaxStreamDurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_MaxStreamDurationValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_Header with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_Header) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_Header with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_HeaderMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_Header) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_Header) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetHeaderName()) < 1 { + err := RouteAction_HashPolicy_HeaderValidationError{ + field: "HeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_Header_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := RouteAction_HashPolicy_HeaderValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_HeaderMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_HeaderMultiError is an error wrapping multiple +// validation errors returned by RouteAction_HashPolicy_Header.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_HashPolicy_HeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_HeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_HeaderMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_HeaderValidationError is the validation error +// returned by RouteAction_HashPolicy_Header.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_HeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_HeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_HeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_HeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_HeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_HeaderValidationError) ErrorName() string { + return "RouteAction_HashPolicy_HeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_HeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_Header.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_HeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_HeaderValidationError{} + +var _RouteAction_HashPolicy_Header_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy_CookieAttribute +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_HashPolicy_CookieAttribute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_HashPolicy_CookieAttribute with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// RouteAction_HashPolicy_CookieAttributeMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_CookieAttribute) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_CookieAttribute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetName()) > 16384 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_CookieAttribute_Name_Pattern.MatchString(m.GetName()) { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetValue()) > 16384 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Value", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_CookieAttribute_Value_Pattern.MatchString(m.GetValue()) { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Value", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_CookieAttributeMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_CookieAttributeMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_CookieAttribute.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_CookieAttributeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_CookieAttributeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_CookieAttributeMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_CookieAttributeValidationError is the validation +// error returned by RouteAction_HashPolicy_CookieAttribute.Validate if the +// designated constraints aren't met. +type RouteAction_HashPolicy_CookieAttributeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) ErrorName() string { + return "RouteAction_HashPolicy_CookieAttributeValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_CookieAttribute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_CookieAttributeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_CookieAttributeValidationError{} + +var _RouteAction_HashPolicy_CookieAttribute_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HashPolicy_CookieAttribute_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy_Cookie with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_Cookie) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_Cookie with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_CookieMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_Cookie) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_Cookie) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_CookieValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTtl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTtl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Path + + for idx, item := range m.GetAttributes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_CookieMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_CookieMultiError is an error wrapping multiple +// validation errors returned by RouteAction_HashPolicy_Cookie.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_HashPolicy_CookieMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_CookieMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_CookieMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_CookieValidationError is the validation error +// returned by RouteAction_HashPolicy_Cookie.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_CookieValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_CookieValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_CookieValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_CookieValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_CookieValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_CookieValidationError) ErrorName() string { + return "RouteAction_HashPolicy_CookieValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_CookieValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_Cookie.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_CookieValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_CookieValidationError{} + +// Validate checks the field values on +// RouteAction_HashPolicy_ConnectionProperties with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_ConnectionProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_HashPolicy_ConnectionProperties with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RouteAction_HashPolicy_ConnectionPropertiesMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_ConnectionProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SourceIp + + if len(errors) > 0 { + return RouteAction_HashPolicy_ConnectionPropertiesMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_ConnectionPropertiesMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_ConnectionProperties.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_ConnectionPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_ConnectionPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_ConnectionPropertiesMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_ConnectionPropertiesValidationError is the validation +// error returned by RouteAction_HashPolicy_ConnectionProperties.Validate if +// the designated constraints aren't met. +type RouteAction_HashPolicy_ConnectionPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) ErrorName() string { + return "RouteAction_HashPolicy_ConnectionPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_ConnectionProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_ConnectionPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_ConnectionPropertiesValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_QueryParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_HashPolicy_QueryParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_QueryParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_QueryParameterMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_QueryParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_QueryParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_QueryParameterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_QueryParameterMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_QueryParameterMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_QueryParameter.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_QueryParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_QueryParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_QueryParameterMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_QueryParameterValidationError is the validation error +// returned by RouteAction_HashPolicy_QueryParameter.Validate if the +// designated constraints aren't met. +type RouteAction_HashPolicy_QueryParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_QueryParameterValidationError) ErrorName() string { + return "RouteAction_HashPolicy_QueryParameterValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_QueryParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_QueryParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_QueryParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_QueryParameterValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_FilterState with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteAction_HashPolicy_FilterState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_FilterState +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_FilterStateMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_FilterState) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_FilterState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := RouteAction_HashPolicy_FilterStateValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_FilterStateMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_FilterStateMultiError is an error wrapping multiple +// validation errors returned by +// RouteAction_HashPolicy_FilterState.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_FilterStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_FilterStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_FilterStateMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_FilterStateValidationError is the validation error +// returned by RouteAction_HashPolicy_FilterState.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_FilterStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_FilterStateValidationError) ErrorName() string { + return "RouteAction_HashPolicy_FilterStateValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_FilterStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_FilterState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_FilterStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_FilterStateValidationError{} + +// Validate checks the field values on RouteAction_UpgradeConfig_ConnectConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_UpgradeConfig_ConnectConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_UpgradeConfig_ConnectConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// RouteAction_UpgradeConfig_ConnectConfigMultiError, or nil if none found. +func (m *RouteAction_UpgradeConfig_ConnectConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetProxyProtocolConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyProtocolConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowPost + + if len(errors) > 0 { + return RouteAction_UpgradeConfig_ConnectConfigMultiError(errors) + } + + return nil +} + +// RouteAction_UpgradeConfig_ConnectConfigMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_UpgradeConfig_ConnectConfig.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_UpgradeConfig_ConnectConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_UpgradeConfig_ConnectConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_UpgradeConfig_ConnectConfigMultiError) AllErrors() []error { return m } + +// RouteAction_UpgradeConfig_ConnectConfigValidationError is the validation +// error returned by RouteAction_UpgradeConfig_ConnectConfig.Validate if the +// designated constraints aren't met. +type RouteAction_UpgradeConfig_ConnectConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) ErrorName() string { + return "RouteAction_UpgradeConfig_ConnectConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_UpgradeConfig_ConnectConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_UpgradeConfig_ConnectConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_UpgradeConfig_ConnectConfigValidationError{} + +// Validate checks the field values on RetryPolicy_RetryPriority with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryPriority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryPriority with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryPriorityMultiError, or nil if none found. +func (m *RetryPolicy_RetryPriority) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryPriority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryPriorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryPriorityMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryPriorityMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryPriority.ValidateAll() if the +// designated constraints aren't met. +type RetryPolicy_RetryPriorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryPriorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryPriorityMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryPriorityValidationError is the validation error returned by +// RetryPolicy_RetryPriority.Validate if the designated constraints aren't met. +type RetryPolicy_RetryPriorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryPriorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryPriorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryPriorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryPriorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryPriorityValidationError) ErrorName() string { + return "RetryPolicy_RetryPriorityValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryPriorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryPriority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryPriorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryPriorityValidationError{} + +// Validate checks the field values on RetryPolicy_RetryHostPredicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryHostPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryHostPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryHostPredicateMultiError, or nil if none found. +func (m *RetryPolicy_RetryHostPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryHostPredicateMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryHostPredicateMultiError is an error wrapping multiple +// validation errors returned by RetryPolicy_RetryHostPredicate.ValidateAll() +// if the designated constraints aren't met. +type RetryPolicy_RetryHostPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryHostPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryHostPredicateMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryHostPredicateValidationError is the validation error +// returned by RetryPolicy_RetryHostPredicate.Validate if the designated +// constraints aren't met. +type RetryPolicy_RetryHostPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryHostPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryHostPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryHostPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryHostPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryHostPredicateValidationError) ErrorName() string { + return "RetryPolicy_RetryHostPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryHostPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryHostPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryHostPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryHostPredicateValidationError{} + +// Validate checks the field values on RetryPolicy_RetryBackOff with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryBackOff) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryBackOff with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryBackOffMultiError, or nil if none found. +func (m *RetryPolicy_RetryBackOff) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryBackOff) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RetryBackOffValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RetryBackOffValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return RetryPolicy_RetryBackOffMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryBackOffMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryBackOff.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_RetryBackOffMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryBackOffMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryBackOffMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryBackOffValidationError is the validation error returned by +// RetryPolicy_RetryBackOff.Validate if the designated constraints aren't met. +type RetryPolicy_RetryBackOffValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryBackOffValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryBackOffValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryBackOffValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryBackOffValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryBackOffValidationError) ErrorName() string { + return "RetryPolicy_RetryBackOffValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryBackOffValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryBackOff.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryBackOffValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryBackOffValidationError{} + +// Validate checks the field values on RetryPolicy_ResetHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_ResetHeader) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_ResetHeader with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_ResetHeaderMultiError, or nil if none found. +func (m *RetryPolicy_ResetHeader) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_ResetHeader) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RetryPolicy_ResetHeader_Name_Pattern.MatchString(m.GetName()) { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := RetryPolicy_ResetHeaderFormat_name[int32(m.GetFormat())]; !ok { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Format", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RetryPolicy_ResetHeaderMultiError(errors) + } + + return nil +} + +// RetryPolicy_ResetHeaderMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_ResetHeader.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_ResetHeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_ResetHeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_ResetHeaderMultiError) AllErrors() []error { return m } + +// RetryPolicy_ResetHeaderValidationError is the validation error returned by +// RetryPolicy_ResetHeader.Validate if the designated constraints aren't met. +type RetryPolicy_ResetHeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_ResetHeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_ResetHeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_ResetHeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_ResetHeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_ResetHeaderValidationError) ErrorName() string { + return "RetryPolicy_ResetHeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_ResetHeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_ResetHeader.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_ResetHeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_ResetHeaderValidationError{} + +var _RetryPolicy_ResetHeader_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RetryPolicy_RateLimitedRetryBackOff with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RetryPolicy_RateLimitedRetryBackOff) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RateLimitedRetryBackOff +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RateLimitedRetryBackOffMultiError, or nil if none found. +func (m *RetryPolicy_RateLimitedRetryBackOff) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetResetHeaders()) < 1 { + err := RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "ResetHeaders", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return RetryPolicy_RateLimitedRetryBackOffMultiError(errors) + } + + return nil +} + +// RetryPolicy_RateLimitedRetryBackOffMultiError is an error wrapping multiple +// validation errors returned by +// RetryPolicy_RateLimitedRetryBackOff.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_RateLimitedRetryBackOffMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RateLimitedRetryBackOffMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RateLimitedRetryBackOffMultiError) AllErrors() []error { return m } + +// RetryPolicy_RateLimitedRetryBackOffValidationError is the validation error +// returned by RetryPolicy_RateLimitedRetryBackOff.Validate if the designated +// constraints aren't met. +type RetryPolicy_RateLimitedRetryBackOffValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) ErrorName() string { + return "RetryPolicy_RateLimitedRetryBackOffValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RateLimitedRetryBackOff.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RateLimitedRetryBackOffValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RateLimitedRetryBackOffValidationError{} + +// Validate checks the field values on RateLimit_Action with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_ActionMultiError, or nil if none found. +func (m *RateLimit_Action) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofActionSpecifierPresent := false + switch v := m.ActionSpecifier.(type) { + case *RateLimit_Action_SourceCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSourceCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_DestinationCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDestinationCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_RequestHeaders_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRequestHeaders()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestHeaders()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_RemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_GenericKey_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGenericKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenericKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_HeaderValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_DynamicMetadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDynamicMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_Metadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_Extension: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_MaskedRemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMaskedRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaskedRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_QueryParameterValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetQueryParameterValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryParameterValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofActionSpecifierPresent { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimit_ActionMultiError(errors) + } + + return nil +} + +// RateLimit_ActionMultiError is an error wrapping multiple validation errors +// returned by RateLimit_Action.ValidateAll() if the designated constraints +// aren't met. +type RateLimit_ActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_ActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_ActionMultiError) AllErrors() []error { return m } + +// RateLimit_ActionValidationError is the validation error returned by +// RateLimit_Action.Validate if the designated constraints aren't met. +type RateLimit_ActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_ActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_ActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_ActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_ActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_ActionValidationError) ErrorName() string { return "RateLimit_ActionValidationError" } + +// Error satisfies the builtin error interface +func (e RateLimit_ActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_ActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_ActionValidationError{} + +// Validate checks the field values on RateLimit_Override with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Override) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Override with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_OverrideMultiError, or nil if none found. +func (m *RateLimit_Override) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Override) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOverrideSpecifierPresent := false + switch v := m.OverrideSpecifier.(type) { + case *RateLimit_Override_DynamicMetadata_: + if v == nil { + err := RateLimit_OverrideValidationError{ + field: "OverrideSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverrideSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDynamicMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOverrideSpecifierPresent { + err := RateLimit_OverrideValidationError{ + field: "OverrideSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimit_OverrideMultiError(errors) + } + + return nil +} + +// RateLimit_OverrideMultiError is an error wrapping multiple validation errors +// returned by RateLimit_Override.ValidateAll() if the designated constraints +// aren't met. +type RateLimit_OverrideMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_OverrideMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_OverrideMultiError) AllErrors() []error { return m } + +// RateLimit_OverrideValidationError is the validation error returned by +// RateLimit_Override.Validate if the designated constraints aren't met. +type RateLimit_OverrideValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_OverrideValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_OverrideValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_OverrideValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_OverrideValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_OverrideValidationError) ErrorName() string { + return "RateLimit_OverrideValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_OverrideValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Override.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_OverrideValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_OverrideValidationError{} + +// Validate checks the field values on RateLimit_Action_SourceCluster with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_SourceCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_SourceCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_SourceClusterMultiError, or nil if none found. +func (m *RateLimit_Action_SourceCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_SourceCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_SourceClusterMultiError(errors) + } + + return nil +} + +// RateLimit_Action_SourceClusterMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_SourceCluster.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_SourceClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_SourceClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_SourceClusterMultiError) AllErrors() []error { return m } + +// RateLimit_Action_SourceClusterValidationError is the validation error +// returned by RateLimit_Action_SourceCluster.Validate if the designated +// constraints aren't met. +type RateLimit_Action_SourceClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_SourceClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_SourceClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_SourceClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_SourceClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_SourceClusterValidationError) ErrorName() string { + return "RateLimit_Action_SourceClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_SourceClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_SourceCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_SourceClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_SourceClusterValidationError{} + +// Validate checks the field values on RateLimit_Action_DestinationCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_DestinationCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_DestinationCluster +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_DestinationClusterMultiError, or nil if none found. +func (m *RateLimit_Action_DestinationCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_DestinationCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_DestinationClusterMultiError(errors) + } + + return nil +} + +// RateLimit_Action_DestinationClusterMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_DestinationCluster.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_DestinationClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_DestinationClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_DestinationClusterMultiError) AllErrors() []error { return m } + +// RateLimit_Action_DestinationClusterValidationError is the validation error +// returned by RateLimit_Action_DestinationCluster.Validate if the designated +// constraints aren't met. +type RateLimit_Action_DestinationClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_DestinationClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_DestinationClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_DestinationClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_DestinationClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_DestinationClusterValidationError) ErrorName() string { + return "RateLimit_Action_DestinationClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_DestinationClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_DestinationCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_DestinationClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_DestinationClusterValidationError{} + +// Validate checks the field values on RateLimit_Action_RequestHeaders with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_RequestHeaders) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_RequestHeaders with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_RequestHeadersMultiError, or nil if none found. +func (m *RateLimit_Action_RequestHeaders) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_RequestHeaders) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetHeaderName()) < 1 { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "HeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RateLimit_Action_RequestHeaders_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for SkipIfAbsent + + if len(errors) > 0 { + return RateLimit_Action_RequestHeadersMultiError(errors) + } + + return nil +} + +// RateLimit_Action_RequestHeadersMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_RequestHeaders.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_RequestHeadersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_RequestHeadersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_RequestHeadersMultiError) AllErrors() []error { return m } + +// RateLimit_Action_RequestHeadersValidationError is the validation error +// returned by RateLimit_Action_RequestHeaders.Validate if the designated +// constraints aren't met. +type RateLimit_Action_RequestHeadersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_RequestHeadersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_RequestHeadersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_RequestHeadersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_RequestHeadersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_RequestHeadersValidationError) ErrorName() string { + return "RateLimit_Action_RequestHeadersValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_RequestHeadersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_RequestHeaders.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_RequestHeadersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_RequestHeadersValidationError{} + +var _RateLimit_Action_RequestHeaders_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RateLimit_Action_RemoteAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_RemoteAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_RemoteAddress with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_RemoteAddressMultiError, or nil if none found. +func (m *RateLimit_Action_RemoteAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_RemoteAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_RemoteAddressMultiError(errors) + } + + return nil +} + +// RateLimit_Action_RemoteAddressMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_RemoteAddress.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_RemoteAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_RemoteAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_RemoteAddressMultiError) AllErrors() []error { return m } + +// RateLimit_Action_RemoteAddressValidationError is the validation error +// returned by RateLimit_Action_RemoteAddress.Validate if the designated +// constraints aren't met. +type RateLimit_Action_RemoteAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_RemoteAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_RemoteAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_RemoteAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_RemoteAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_RemoteAddressValidationError) ErrorName() string { + return "RateLimit_Action_RemoteAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_RemoteAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_RemoteAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_RemoteAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_RemoteAddressValidationError{} + +// Validate checks the field values on RateLimit_Action_MaskedRemoteAddress +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RateLimit_Action_MaskedRemoteAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_MaskedRemoteAddress +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_MaskedRemoteAddressMultiError, or nil if none found. +func (m *RateLimit_Action_MaskedRemoteAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_MaskedRemoteAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetV4PrefixMaskLen(); wrapper != nil { + + if wrapper.GetValue() > 32 { + err := RateLimit_Action_MaskedRemoteAddressValidationError{ + field: "V4PrefixMaskLen", + reason: "value must be less than or equal to 32", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetV6PrefixMaskLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := RateLimit_Action_MaskedRemoteAddressValidationError{ + field: "V6PrefixMaskLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return RateLimit_Action_MaskedRemoteAddressMultiError(errors) + } + + return nil +} + +// RateLimit_Action_MaskedRemoteAddressMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_MaskedRemoteAddress.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_MaskedRemoteAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_MaskedRemoteAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_MaskedRemoteAddressMultiError) AllErrors() []error { return m } + +// RateLimit_Action_MaskedRemoteAddressValidationError is the validation error +// returned by RateLimit_Action_MaskedRemoteAddress.Validate if the designated +// constraints aren't met. +type RateLimit_Action_MaskedRemoteAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) ErrorName() string { + return "RateLimit_Action_MaskedRemoteAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_MaskedRemoteAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_MaskedRemoteAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_MaskedRemoteAddressValidationError{} + +// Validate checks the field values on RateLimit_Action_GenericKey with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_GenericKey) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_GenericKey with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_Action_GenericKeyMultiError, or nil if none found. +func (m *RateLimit_Action_GenericKey) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_GenericKey) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_GenericKeyValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DescriptorKey + + if len(errors) > 0 { + return RateLimit_Action_GenericKeyMultiError(errors) + } + + return nil +} + +// RateLimit_Action_GenericKeyMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_GenericKey.ValidateAll() if +// the designated constraints aren't met. +type RateLimit_Action_GenericKeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_GenericKeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_GenericKeyMultiError) AllErrors() []error { return m } + +// RateLimit_Action_GenericKeyValidationError is the validation error returned +// by RateLimit_Action_GenericKey.Validate if the designated constraints +// aren't met. +type RateLimit_Action_GenericKeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_GenericKeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_GenericKeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_GenericKeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_GenericKeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_GenericKeyValidationError) ErrorName() string { + return "RateLimit_Action_GenericKeyValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_GenericKeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_GenericKey.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_GenericKeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_GenericKeyValidationError{} + +// Validate checks the field values on RateLimit_Action_HeaderValueMatch with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_HeaderValueMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_HeaderValueMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_HeaderValueMatchMultiError, or nil if none found. +func (m *RateLimit_Action_HeaderValueMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_HeaderValueMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DescriptorKey + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_HeaderValueMatchValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExpectMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpectMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetHeaders()) < 1 { + err := RateLimit_Action_HeaderValueMatchValidationError{ + field: "Headers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RateLimit_Action_HeaderValueMatchMultiError(errors) + } + + return nil +} + +// RateLimit_Action_HeaderValueMatchMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_HeaderValueMatch.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_HeaderValueMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_HeaderValueMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_HeaderValueMatchMultiError) AllErrors() []error { return m } + +// RateLimit_Action_HeaderValueMatchValidationError is the validation error +// returned by RateLimit_Action_HeaderValueMatch.Validate if the designated +// constraints aren't met. +type RateLimit_Action_HeaderValueMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_HeaderValueMatchValidationError) ErrorName() string { + return "RateLimit_Action_HeaderValueMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_HeaderValueMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_HeaderValueMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_HeaderValueMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_HeaderValueMatchValidationError{} + +// Validate checks the field values on RateLimit_Action_DynamicMetaData with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_DynamicMetaData) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_DynamicMetaData with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_DynamicMetaDataMultiError, or nil if none found. +func (m *RateLimit_Action_DynamicMetaData) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_DynamicMetaData) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_DynamicMetaDataValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMetadataKey() == nil { + err := RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return RateLimit_Action_DynamicMetaDataMultiError(errors) + } + + return nil +} + +// RateLimit_Action_DynamicMetaDataMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_DynamicMetaData.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_DynamicMetaDataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_DynamicMetaDataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_DynamicMetaDataMultiError) AllErrors() []error { return m } + +// RateLimit_Action_DynamicMetaDataValidationError is the validation error +// returned by RateLimit_Action_DynamicMetaData.Validate if the designated +// constraints aren't met. +type RateLimit_Action_DynamicMetaDataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_DynamicMetaDataValidationError) ErrorName() string { + return "RateLimit_Action_DynamicMetaDataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_DynamicMetaDataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_DynamicMetaData.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_DynamicMetaDataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_DynamicMetaDataValidationError{} + +// Validate checks the field values on RateLimit_Action_MetaData with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_MetaData) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_MetaData with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_Action_MetaDataMultiError, or nil if none found. +func (m *RateLimit_Action_MetaData) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_MetaData) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_MetaDataValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMetadataKey() == nil { + err := RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if _, ok := RateLimit_Action_MetaData_Source_name[int32(m.GetSource())]; !ok { + err := RateLimit_Action_MetaDataValidationError{ + field: "Source", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for SkipIfAbsent + + if len(errors) > 0 { + return RateLimit_Action_MetaDataMultiError(errors) + } + + return nil +} + +// RateLimit_Action_MetaDataMultiError is an error wrapping multiple validation +// errors returned by RateLimit_Action_MetaData.ValidateAll() if the +// designated constraints aren't met. +type RateLimit_Action_MetaDataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_MetaDataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_MetaDataMultiError) AllErrors() []error { return m } + +// RateLimit_Action_MetaDataValidationError is the validation error returned by +// RateLimit_Action_MetaData.Validate if the designated constraints aren't met. +type RateLimit_Action_MetaDataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_MetaDataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_MetaDataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_MetaDataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_MetaDataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_MetaDataValidationError) ErrorName() string { + return "RateLimit_Action_MetaDataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_MetaDataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_MetaData.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_MetaDataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_MetaDataValidationError{} + +// Validate checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_QueryParameterValueMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RateLimit_Action_QueryParameterValueMatchMultiError, or nil if none found. +func (m *RateLimit_Action_QueryParameterValueMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DescriptorKey + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExpectMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpectMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetQueryParameters()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "QueryParameters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetQueryParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RateLimit_Action_QueryParameterValueMatchMultiError(errors) + } + + return nil +} + +// RateLimit_Action_QueryParameterValueMatchMultiError is an error wrapping +// multiple validation errors returned by +// RateLimit_Action_QueryParameterValueMatch.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) AllErrors() []error { return m } + +// RateLimit_Action_QueryParameterValueMatchValidationError is the validation +// error returned by RateLimit_Action_QueryParameterValueMatch.Validate if the +// designated constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) ErrorName() string { + return "RateLimit_Action_QueryParameterValueMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_QueryParameterValueMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_QueryParameterValueMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_QueryParameterValueMatchValidationError{} + +// Validate checks the field values on RateLimit_Override_DynamicMetadata with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Override_DynamicMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Override_DynamicMetadata +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Override_DynamicMetadataMultiError, or nil if none found. +func (m *RateLimit_Override_DynamicMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Override_DynamicMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMetadataKey() == nil { + err := RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RateLimit_Override_DynamicMetadataMultiError(errors) + } + + return nil +} + +// RateLimit_Override_DynamicMetadataMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Override_DynamicMetadata.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Override_DynamicMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Override_DynamicMetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Override_DynamicMetadataMultiError) AllErrors() []error { return m } + +// RateLimit_Override_DynamicMetadataValidationError is the validation error +// returned by RateLimit_Override_DynamicMetadata.Validate if the designated +// constraints aren't met. +type RateLimit_Override_DynamicMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Override_DynamicMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Override_DynamicMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Override_DynamicMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Override_DynamicMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Override_DynamicMetadataValidationError) ErrorName() string { + return "RateLimit_Override_DynamicMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Override_DynamicMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Override_DynamicMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Override_DynamicMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Override_DynamicMetadataValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go new file mode 100644 index 000000000..79709bb97 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go @@ -0,0 +1,8302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *VirtualHost) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualHost) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualHost) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.IncludeIsTimeoutRetryHeader { + i-- + if m.IncludeIsTimeoutRetryHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.IncludeAttemptCountInResponse { + i-- + if m.IncludeAttemptCountInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeRequestAttemptCount { + i-- + if m.IncludeRequestAttemptCount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.VirtualClusters) > 0 { + for iNdEx := len(m.VirtualClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RequireTls != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequireTls)) + i-- + dAtA[i] = 0x20 + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Domains) > 0 { + for iNdEx := len(m.Domains) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Domains[iNdEx]) + copy(dAtA[i:], m.Domains[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Domains[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != nil { + size, err := (*anypb.Any)(m.Action).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if msg, ok := m.Action.(*Route_NonForwardingAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_FilterAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x72 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.Action.(*Route_DirectResponse); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Decorator != nil { + size, err := m.Decorator.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.Action.(*Route_Redirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_Route); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Match != nil { + size, err := m.Match.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Route_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Route_Redirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Redirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Redirect != nil { + size, err := m.Redirect.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Route_DirectResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_DirectResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectResponse != nil { + size, err := m.DirectResponse.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Route_FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterAction != nil { + size, err := m.FilterAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Route_NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NonForwardingAction != nil { + size, err := m.NonForwardingAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *WeightedCluster_ClusterWeight) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster_ClusterWeight) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HostRewriteSpecifier.(*WeightedCluster_ClusterWeight_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Weight != nil { + size, err := (*wrapperspb.UInt32Value)(m.Weight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x5a + return len(dAtA) - i, nil +} +func (m *WeightedCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.RandomValueSpecifier.(*WeightedCluster_HeaderName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TotalWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.TotalWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RuntimeKeyPrefix) > 0 { + i -= len(m.RuntimeKeyPrefix) + copy(dAtA[i:], m.RuntimeKeyPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKeyPrefix))) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Clusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_HeaderName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_HeaderName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *ClusterSpecifierPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Validated != nil { + size, err := (*wrapperspb.BoolValue)(m.Validated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Presented != nil { + size, err := (*wrapperspb.BoolValue)(m.Presented).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathMatchPolicy); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathSeparatedPrefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.DynamicMetadata) > 0 { + for iNdEx := len(m.DynamicMetadata) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DynamicMetadata[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DynamicMetadata[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if msg, ok := m.PathSpecifier.(*RouteMatch_ConnectMatcher_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsContext != nil { + size, err := m.TlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.PathSpecifier.(*RouteMatch_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Grpc != nil { + size, err := m.Grpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.CaseSensitive != nil { + size, err := (*wrapperspb.BoolValue)(m.CaseSensitive).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteMatch_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteMatch_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + if vtmsg, ok := interface{}(m.SafeRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_ConnectMatcher_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectMatcher != nil { + size, err := m.ConnectMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathSeparatedPrefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathSeparatedPrefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathSeparatedPrefix) + copy(dAtA[i:], m.PathSeparatedPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathSeparatedPrefix))) + i-- + dAtA[i] = 0x72 + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathMatchPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathMatchPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathMatchPolicy != nil { + if vtmsg, ok := interface{}(m.PathMatchPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathMatchPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *CorsPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CorsPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ForwardNotMatchingPreflights != nil { + size, err := (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.AllowPrivateNetworkAccess != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.AllowOriginStringMatch) > 0 { + for iNdEx := len(m.AllowOriginStringMatch) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AllowOriginStringMatch[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AllowOriginStringMatch[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + } + if m.ShadowEnabled != nil { + if vtmsg, ok := interface{}(m.ShadowEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + if msg, ok := m.EnabledSpecifier.(*CorsPolicy_FilterEnabled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AllowCredentials != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowCredentials).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.MaxAge) > 0 { + i -= len(m.MaxAge) + copy(dAtA[i:], m.MaxAge) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxAge))) + i-- + dAtA[i] = 0x2a + } + if len(m.ExposeHeaders) > 0 { + i -= len(m.ExposeHeaders) + copy(dAtA[i:], m.ExposeHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExposeHeaders))) + i-- + dAtA[i] = 0x22 + } + if len(m.AllowHeaders) > 0 { + i -= len(m.AllowHeaders) + copy(dAtA[i:], m.AllowHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowHeaders))) + i-- + dAtA[i] = 0x1a + } + if len(m.AllowMethods) > 0 { + i -= len(m.AllowMethods) + copy(dAtA[i:], m.AllowMethods) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowMethods))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *CorsPolicy_FilterEnabled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy_FilterEnabled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterEnabled != nil { + if vtmsg, ok := interface{}(m.FilterEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_RequestMirrorPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableShadowHostSuffixAppend { + i-- + if m.DisableShadowHostSuffixAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x2a + } + if m.TraceSampled != nil { + size, err := (*wrapperspb.BoolValue)(m.TraceSampled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SourceIp { + i-- + if m.SourceIp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_QueryParameter_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Terminal { + i-- + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_ConnectionProperties_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Cookie_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Header_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_Cookie_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cookie != nil { + size, err := m.Cookie.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameter != nil { + size, err := m.QueryParameter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowPost { + i-- + if m.AllowPost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ProxyProtocolConfig != nil { + if vtmsg, ok := interface{}(m.ProxyProtocolConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProxyProtocolConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectConfig != nil { + size, err := m.ConnectConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_MaxStreamDuration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcTimeoutHeaderOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.GrpcTimeoutHeaderMax != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PathRewritePolicy != nil { + if vtmsg, ok := interface{}(m.PathRewritePolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathRewritePolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.EarlyDataPolicy != nil { + if vtmsg, ok := interface{}(m.EarlyDataPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyDataPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_InlineClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AppendXForwardedHost { + i-- + if m.AppendXForwardedHost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.MaxStreamDuration != nil { + size, err := m.MaxStreamDuration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewritePathRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InternalRedirectPolicy != nil { + size, err := m.InternalRedirectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.GrpcTimeoutOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.InternalRedirectAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InternalRedirectAction)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.MaxGrpcTimeout != nil { + size, err := (*durationpb.Duration)(m.MaxGrpcTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.ClusterNotFoundResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClusterNotFoundResponseCode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.HashPolicy) > 0 { + for iNdEx := len(m.HashPolicy) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HashPolicy[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeVhRateLimits != nil { + size, err := (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x58 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_AutoHostRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.PrefixRewrite) > 0 { + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_WeightedClusters); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteAction_WeightedClusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_WeightedClusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.WeightedClusters != nil { + size, err := m.WeightedClusters.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *RouteAction_AutoHostRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_AutoHostRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AutoHostRewrite != nil { + size, err := (*wrapperspb.BoolValue)(m.AutoHostRewrite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteHeader) + copy(dAtA[i:], m.HostRewriteHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteHeader))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewritePathRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewritePathRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HostRewritePathRegex != nil { + if vtmsg, ok := interface{}(m.HostRewritePathRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HostRewritePathRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterSpecifierPlugin) + copy(dAtA[i:], m.ClusterSpecifierPlugin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterSpecifierPlugin))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + return len(dAtA) - i, nil +} +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InlineClusterSpecifierPlugin != nil { + size, err := m.InlineClusterSpecifierPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_ResetHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_ResetHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_ResetHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ResetHeaders) > 0 { + for iNdEx := len(m.ResetHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResetHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerTryIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if len(m.RetryOptionsPredicates) > 0 { + for iNdEx := len(m.RetryOptionsPredicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetryOptionsPredicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetryOptionsPredicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.RateLimitedRetryBackOff != nil { + size, err := m.RateLimitedRetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if len(m.RetriableRequestHeaders) > 0 { + for iNdEx := len(m.RetriableRequestHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableRequestHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RetriableHeaders) > 0 { + for iNdEx := len(m.RetriableHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RetriableStatusCodes) > 0 { + var pksize2 int + for _, num := range m.RetriableStatusCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RetriableStatusCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.PerTryTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HedgePolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HedgePolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HedgePolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HedgeOnPerTryTimeout { + i-- + if m.HedgeOnPerTryTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AdditionalRequestChance != nil { + if vtmsg, ok := interface{}(m.AdditionalRequestChance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalRequestChance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.InitialRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_RegexRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PortRedirect != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortRedirect)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_SchemeRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.StripQuery { + i-- + if m.StripQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PrefixRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_HttpsRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseCode)) + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PathRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.HostRedirect) > 0 { + i -= len(m.HostRedirect) + copy(dAtA[i:], m.HostRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRedirect))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction_PathRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PathRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathRedirect) + copy(dAtA[i:], m.PathRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathRedirect))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RedirectAction_HttpsRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_HttpsRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.HttpsRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *RedirectAction_PrefixRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PrefixRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *RedirectAction_SchemeRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_SchemeRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeRedirect) + copy(dAtA[i:], m.SchemeRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeRedirect))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *RedirectAction_RegexRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_RegexRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *DirectResponseAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DirectResponseAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DirectResponseAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NonForwardingAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Decorator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Decorator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Decorator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Propagate != nil { + size, err := (*wrapperspb.BoolValue)(m.Propagate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Operation) > 0 { + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.V6PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.V4PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_GenericKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_GenericKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Source != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x20 + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_QueryParameterValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_MaskedRemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Extension); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DynamicMetadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_HeaderValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_GenericKey_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RequestHeaders_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DestinationCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_SourceCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceCluster != nil { + size, err := m.SourceCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DestinationCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationCluster != nil { + size, err := m.DestinationCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RequestHeaders_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeaders != nil { + size, err := m.RequestHeaders.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteAddress != nil { + size, err := m.RemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_GenericKey_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericKey != nil { + size, err := m.GenericKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueMatch != nil { + size, err := m.HeaderValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaskedRemoteAddress != nil { + size, err := m.MaskedRemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameterValueMatch != nil { + size, err := m.QueryParameterValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Override_DynamicMetadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverrideSpecifier.(*RateLimit_Override_DynamicMetadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != nil { + size, err := m.Limit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DisableKey) > 0 { + i -= len(m.DisableKey) + copy(dAtA[i:], m.DisableKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisableKey))) + i-- + dAtA[i] = 0x12 + } + if m.Stage != nil { + size, err := (*wrapperspb.UInt32Value)(m.Stage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TreatMissingHeaderAsEmpty { + i-- + if m.TreatMissingHeaderAsEmpty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ContainsMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SafeRegexMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SuffixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PrefixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InvertMatch { + i-- + if m.InvertMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_RangeMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ExactMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher_ExactMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ExactMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ExactMatch) + copy(dAtA[i:], m.ExactMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExactMatch))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_RangeMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_RangeMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RangeMatch != nil { + if vtmsg, ok := interface{}(m.RangeMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RangeMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PrefixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PrefixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixMatch) + copy(dAtA[i:], m.PrefixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixMatch))) + i-- + dAtA[i] = 0x4a + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SuffixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SuffixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SuffixMatch) + copy(dAtA[i:], m.SuffixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SuffixMatch))) + i-- + dAtA[i] = 0x52 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SafeRegexMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SafeRegexMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegexMatch != nil { + if vtmsg, ok := interface{}(m.SafeRegexMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegexMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_ContainsMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ContainsMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ContainsMatch) + copy(dAtA[i:], m.ContainsMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContainsMatch))) + i-- + dAtA[i] = 0x62 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameterMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *InternalRedirectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRedirectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *InternalRedirectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResponseHeadersToCopy) > 0 { + for iNdEx := len(m.ResponseHeadersToCopy) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToCopy[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToCopy[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToCopy[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.AllowCrossSchemeRedirect { + i-- + if m.AllowCrossSchemeRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Predicates) > 0 { + for iNdEx := len(m.Predicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Predicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Predicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RedirectResponseCodes) > 0 { + var pksize2 int + for _, num := range m.RedirectResponseCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RedirectResponseCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + size, err := (*anypb.Any)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualHost) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Domains) > 0 { + for _, s := range m.Domains { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireTls != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequireTls)) + } + if len(m.VirtualClusters) > 0 { + for _, e := range m.VirtualClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeRequestAttemptCount { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IncludeAttemptCountInResponse { + n += 3 + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeIsTimeoutRetryHeader { + n += 3 + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = (*anypb.Any)(m.Action).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + l = m.Match.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Action.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Decorator != nil { + l = m.Decorator.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Route_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_Redirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Redirect != nil { + l = m.Redirect.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_DirectResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectResponse != nil { + l = m.DirectResponse.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterAction != nil { + l = m.FilterAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Route_NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NonForwardingAction != nil { + l = m.NonForwardingAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *WeightedCluster_ClusterWeight) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != nil { + l = (*wrapperspb.UInt32Value)(m.Weight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *WeightedCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.RuntimeKeyPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalWeight != nil { + l = (*wrapperspb.UInt32Value)(m.TotalWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RandomValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_HeaderName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_GrpcRouteMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_TlsContextMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Presented != nil { + l = (*wrapperspb.BoolValue)(m.Presented).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Validated != nil { + l = (*wrapperspb.BoolValue)(m.Validated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_ConnectMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PathSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.CaseSensitive != nil { + l = (*wrapperspb.BoolValue)(m.CaseSensitive).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Grpc != nil { + l = m.Grpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsContext != nil { + l = m.TlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicMetadata) > 0 { + for _, e := range m.DynamicMetadata { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + if size, ok := interface{}(m.SafeRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegex) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_ConnectMatcher_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectMatcher != nil { + l = m.ConnectMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_PathSeparatedPrefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathSeparatedPrefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_PathMatchPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathMatchPolicy != nil { + if size, ok := interface{}(m.PathMatchPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathMatchPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CorsPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AllowMethods) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AllowHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ExposeHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxAge) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCredentials != nil { + l = (*wrapperspb.BoolValue)(m.AllowCredentials).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.EnabledSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ShadowEnabled != nil { + if size, ok := interface{}(m.ShadowEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AllowOriginStringMatch) > 0 { + for _, e := range m.AllowOriginStringMatch { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowPrivateNetworkAccess != nil { + l = (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardNotMatchingPreflights != nil { + l = (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CorsPolicy_FilterEnabled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterEnabled != nil { + if size, ok := interface{}(m.FilterEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_RequestMirrorPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceSampled != nil { + l = (*wrapperspb.BoolValue)(m.TraceSampled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableShadowHostSuffixAppend { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_CookieAttribute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Terminal { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_Cookie_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cookie != nil { + l = m.Cookie.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_QueryParameter_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameter != nil { + l = m.QueryParameter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProxyProtocolConfig != nil { + if size, ok := interface{}(m.ProxyProtocolConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProxyProtocolConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowPost { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectConfig != nil { + l = m.ConnectConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_MaxStreamDuration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderMax != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ClusterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.PrefixRewrite) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeVhRateLimits != nil { + l = (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HashPolicy) > 0 { + for _, e := range m.HashPolicy { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterNotFoundResponseCode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ClusterNotFoundResponseCode)) + } + if m.MaxGrpcTimeout != nil { + l = (*durationpb.Duration)(m.MaxGrpcTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.InternalRedirectAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.InternalRedirectAction)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutOffset).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalRedirectPolicy != nil { + l = m.InternalRedirectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = m.MaxStreamDuration.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedHost { + n += 3 + } + if m.EarlyDataPolicy != nil { + if size, ok := interface{}(m.EarlyDataPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EarlyDataPolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathRewritePolicy != nil { + if size, ok := interface{}(m.PathRewritePolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathRewritePolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_ClusterHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterHeader) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_WeightedClusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WeightedClusters != nil { + l = m.WeightedClusters.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_AutoHostRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoHostRewrite != nil { + l = (*wrapperspb.BoolValue)(m.AutoHostRewrite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteHeader) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_HostRewritePathRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostRewritePathRegex != nil { + if size, ok := interface{}(m.HostRewritePathRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HostRewritePathRegex) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RouteAction_ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterSpecifierPlugin) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_InlineClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InlineClusterSpecifierPlugin != nil { + l = m.InlineClusterSpecifierPlugin.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_ResetHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResetHeaders) > 0 { + for _, e := range m.ResetHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerTryTimeout != nil { + l = (*durationpb.Duration)(m.PerTryTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + if len(m.RetriableStatusCodes) > 0 { + l = 0 + for _, e := range m.RetriableStatusCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableHeaders) > 0 { + for _, e := range m.RetriableHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RetriableRequestHeaders) > 0 { + for _, e := range m.RetriableRequestHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RateLimitedRetryBackOff != nil { + l = m.RateLimitedRetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryOptionsPredicates) > 0 { + for _, e := range m.RetryOptionsPredicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PerTryIdleTimeout != nil { + l = (*durationpb.Duration)(m.PerTryIdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HedgePolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitialRequests != nil { + l = (*wrapperspb.UInt32Value)(m.InitialRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdditionalRequestChance != nil { + if size, ok := interface{}(m.AdditionalRequestChance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdditionalRequestChance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgeOnPerTryTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRedirect) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PathRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ResponseCode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseCode)) + } + if vtmsg, ok := m.SchemeRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.StripQuery { + n += 2 + } + if m.PortRedirect != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortRedirect)) + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction_PathRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_HttpsRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *RedirectAction_PrefixRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixRewrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_SchemeRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_RegexRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DirectResponseAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Decorator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Operation) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Propagate != nil { + l = (*wrapperspb.BoolValue)(m.Propagate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VirtualCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DestinationCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RequestHeaders) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MaskedRemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.V4PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.V6PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_GenericKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_HeaderValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DynamicMetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Source != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Source)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_QueryParameterValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ActionSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceCluster != nil { + l = m.SourceCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DestinationCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationCluster != nil { + l = m.DestinationCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RequestHeaders_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeaders != nil { + l = m.RequestHeaders.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteAddress != nil { + l = m.RemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_GenericKey_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericKey != nil { + l = m.GenericKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_HeaderValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueMatch != nil { + l = m.HeaderValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_MaskedRemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaskedRemoteAddress != nil { + l = m.MaskedRemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_QueryParameterValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameterValueMatch != nil { + l = m.QueryParameterValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Override_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OverrideSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override_DynamicMetadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stage != nil { + l = (*wrapperspb.UInt32Value)(m.Stage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DisableKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Limit != nil { + l = m.Limit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HeaderMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InvertMatch { + n += 2 + } + if m.TreatMissingHeaderAsEmpty { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher_ExactMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ExactMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_RangeMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RangeMatch != nil { + if size, ok := interface{}(m.RangeMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RangeMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HeaderMatcher_PrefixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SuffixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SuffixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SafeRegexMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegexMatch != nil { + if size, ok := interface{}(m.SafeRegexMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegexMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_ContainsMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainsMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.QueryParameterMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameterMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *InternalRedirectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RedirectResponseCodes) > 0 { + l = 0 + for _, e := range m.RedirectResponseCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Predicates) > 0 { + for _, e := range m.Predicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowCrossSchemeRedirect { + n += 2 + } + if len(m.ResponseHeadersToCopy) > 0 { + for _, s := range m.ResponseHeadersToCopy { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = (*anypb.Any)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go new file mode 100644 index 000000000..4f536bb8d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go @@ -0,0 +1,474 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.IgnorePathParametersInPathMatching { + i-- + if m.IgnorePathParametersInPathMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.IgnorePortInHostMatching { + i-- + if m.IgnorePortInHostMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ClusterSpecifierPlugins) > 0 { + for iNdEx := len(m.ClusterSpecifierPlugins) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterSpecifierPlugins[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + } + if m.MaxDirectResponseBodySizeBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MostSpecificHeaderMutationsWins { + i-- + if m.MostSpecificHeaderMutationsWins { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.Vhds != nil { + size, err := m.Vhds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.ValidateClusters != nil { + size, err := (*wrapperspb.BoolValue)(m.ValidateClusters).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.InternalOnlyHeaders) > 0 { + for iNdEx := len(m.InternalOnlyHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.InternalOnlyHeaders[iNdEx]) + copy(dAtA[i:], m.InternalOnlyHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InternalOnlyHeaders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VirtualHosts) > 0 { + for iNdEx := len(m.VirtualHosts) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualHosts[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Vhds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vhds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Vhds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VirtualHosts) > 0 { + for _, e := range m.VirtualHosts { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InternalOnlyHeaders) > 0 { + for _, s := range m.InternalOnlyHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ValidateClusters != nil { + l = (*wrapperspb.BoolValue)(m.ValidateClusters).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Vhds != nil { + l = m.Vhds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MostSpecificHeaderMutationsWins { + n += 2 + } + if m.MaxDirectResponseBodySizeBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterSpecifierPlugins) > 0 { + for _, e := range m.ClusterSpecifierPlugins { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnorePortInHostMatching { + n += 2 + } + if m.IgnorePathParametersInPathMatching { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Vhds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go new file mode 100644 index 000000000..1c02988b6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go @@ -0,0 +1,464 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. +// The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically +// via RDS (:ref:`route_configuration_name`) +// or specified inline (:ref:`route_configuration`). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: "," +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the “route-config1“ +// RouteConfiguration being assigned to the HTTP request/stream. +// +// [#next-free-field: 6] +type ScopedRouteConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the RouteConfiguration should be loaded on demand. + OnDemand bool `protobuf:"varint,4,opt,name=on_demand,json=onDemand,proto3" json:"on_demand,omitempty"` + // The name assigned to the routing scope. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated + // with this scope. + RouteConfigurationName string `protobuf:"bytes,2,opt,name=route_configuration_name,json=routeConfigurationName,proto3" json:"route_configuration_name,omitempty"` + // The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + RouteConfiguration *RouteConfiguration `protobuf:"bytes,5,opt,name=route_configuration,json=routeConfiguration,proto3" json:"route_configuration,omitempty"` + // The key to match against. + Key *ScopedRouteConfiguration_Key `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *ScopedRouteConfiguration) Reset() { + *x = ScopedRouteConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration) ProtoMessage() {} + +func (x *ScopedRouteConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0} +} + +func (x *ScopedRouteConfiguration) GetOnDemand() bool { + if x != nil { + return x.OnDemand + } + return false +} + +func (x *ScopedRouteConfiguration) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRouteConfiguration) GetRouteConfigurationName() string { + if x != nil { + return x.RouteConfigurationName + } + return "" +} + +func (x *ScopedRouteConfiguration) GetRouteConfiguration() *RouteConfiguration { + if x != nil { + return x.RouteConfiguration + } + return nil +} + +func (x *ScopedRouteConfiguration) GetKey() *ScopedRouteConfiguration_Key { + if x != nil { + return x.Key + } + return nil +} + +// Specifies a key which is matched against the output of the +// :ref:`scope_key_builder` +// specified in the HttpConnectionManager. The matching is done per HTTP +// request and is dependent on the order of the fragments contained in the +// Key. +type ScopedRouteConfiguration_Key struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + Fragments []*ScopedRouteConfiguration_Key_Fragment `protobuf:"bytes,1,rep,name=fragments,proto3" json:"fragments,omitempty"` +} + +func (x *ScopedRouteConfiguration_Key) Reset() { + *x = ScopedRouteConfiguration_Key{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration_Key) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration_Key) ProtoMessage() {} + +func (x *ScopedRouteConfiguration_Key) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration_Key.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration_Key) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ScopedRouteConfiguration_Key) GetFragments() []*ScopedRouteConfiguration_Key_Fragment { + if x != nil { + return x.Fragments + } + return nil +} + +type ScopedRouteConfiguration_Key_Fragment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *ScopedRouteConfiguration_Key_Fragment_StringKey + Type isScopedRouteConfiguration_Key_Fragment_Type `protobuf_oneof:"type"` +} + +func (x *ScopedRouteConfiguration_Key_Fragment) Reset() { + *x = ScopedRouteConfiguration_Key_Fragment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration_Key_Fragment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration_Key_Fragment) ProtoMessage() {} + +func (x *ScopedRouteConfiguration_Key_Fragment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration_Key_Fragment.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration_Key_Fragment) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (m *ScopedRouteConfiguration_Key_Fragment) GetType() isScopedRouteConfiguration_Key_Fragment_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ScopedRouteConfiguration_Key_Fragment) GetStringKey() string { + if x, ok := x.GetType().(*ScopedRouteConfiguration_Key_Fragment_StringKey); ok { + return x.StringKey + } + return "" +} + +type isScopedRouteConfiguration_Key_Fragment_Type interface { + isScopedRouteConfiguration_Key_Fragment_Type() +} + +type ScopedRouteConfiguration_Key_Fragment_StringKey struct { + // A string to match against. + StringKey string `protobuf:"bytes,1,opt,name=string_key,json=stringKey,proto3,oneof"` +} + +func (*ScopedRouteConfiguration_Key_Fragment_StringKey) isScopedRouteConfiguration_Key_Fragment_Type() { +} + +var File_envoy_config_route_v3_scoped_route_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_scoped_route_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xaa, 0x05, 0x0a, 0x18, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x6f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x18, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x14, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0e, + 0x12, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x70, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x14, + 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0e, 0x12, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x03, 0x4b, 0x65, + 0x79, 0x12, 0x64, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x73, 0x0a, 0x08, 0x46, 0x72, 0x61, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x65, 0x79, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, + 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x30, 0x9a, 0xc5, + 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x3a, 0x2c, + 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x87, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_scoped_route_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_scoped_route_proto_rawDescData = file_envoy_config_route_v3_scoped_route_proto_rawDesc +) + +func file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_scoped_route_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_scoped_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_scoped_route_proto_rawDescData) + }) + return file_envoy_config_route_v3_scoped_route_proto_rawDescData +} + +var file_envoy_config_route_v3_scoped_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_route_v3_scoped_route_proto_goTypes = []interface{}{ + (*ScopedRouteConfiguration)(nil), // 0: envoy.config.route.v3.ScopedRouteConfiguration + (*ScopedRouteConfiguration_Key)(nil), // 1: envoy.config.route.v3.ScopedRouteConfiguration.Key + (*ScopedRouteConfiguration_Key_Fragment)(nil), // 2: envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment + (*RouteConfiguration)(nil), // 3: envoy.config.route.v3.RouteConfiguration +} +var file_envoy_config_route_v3_scoped_route_proto_depIdxs = []int32{ + 3, // 0: envoy.config.route.v3.ScopedRouteConfiguration.route_configuration:type_name -> envoy.config.route.v3.RouteConfiguration + 1, // 1: envoy.config.route.v3.ScopedRouteConfiguration.key:type_name -> envoy.config.route.v3.ScopedRouteConfiguration.Key + 2, // 2: envoy.config.route.v3.ScopedRouteConfiguration.Key.fragments:type_name -> envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_scoped_route_proto_init() } +func file_envoy_config_route_v3_scoped_route_proto_init() { + if File_envoy_config_route_v3_scoped_route_proto != nil { + return + } + file_envoy_config_route_v3_route_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_scoped_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration_Key); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration_Key_Fragment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ScopedRouteConfiguration_Key_Fragment_StringKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_scoped_route_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_scoped_route_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_scoped_route_proto_depIdxs, + MessageInfos: file_envoy_config_route_v3_scoped_route_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_scoped_route_proto = out.File + file_envoy_config_route_v3_scoped_route_proto_rawDesc = nil + file_envoy_config_route_v3_scoped_route_proto_goTypes = nil + file_envoy_config_route_v3_scoped_route_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go new file mode 100644 index 000000000..54c187ce1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go @@ -0,0 +1,505 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ScopedRouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRouteConfigurationMultiError, or nil if none found. +func (m *ScopedRouteConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for OnDemand + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRouteConfigurationValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for RouteConfigurationName + + if all { + switch v := interface{}(m.GetRouteConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetKey() == nil { + err := ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopedRouteConfigurationMultiError(errors) + } + + return nil +} + +// ScopedRouteConfigurationMultiError is an error wrapping multiple validation +// errors returned by ScopedRouteConfiguration.ValidateAll() if the designated +// constraints aren't met. +type ScopedRouteConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfigurationMultiError) AllErrors() []error { return m } + +// ScopedRouteConfigurationValidationError is the validation error returned by +// ScopedRouteConfiguration.Validate if the designated constraints aren't met. +type ScopedRouteConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfigurationValidationError) ErrorName() string { + return "ScopedRouteConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfigurationValidationError{} + +// Validate checks the field values on ScopedRouteConfiguration_Key with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfiguration_Key) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration_Key with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRouteConfiguration_KeyMultiError, or nil if none found. +func (m *ScopedRouteConfiguration_Key) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration_Key) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFragments()) < 1 { + err := ScopedRouteConfiguration_KeyValidationError{ + field: "Fragments", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFragments() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRouteConfiguration_KeyMultiError(errors) + } + + return nil +} + +// ScopedRouteConfiguration_KeyMultiError is an error wrapping multiple +// validation errors returned by ScopedRouteConfiguration_Key.ValidateAll() if +// the designated constraints aren't met. +type ScopedRouteConfiguration_KeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfiguration_KeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfiguration_KeyMultiError) AllErrors() []error { return m } + +// ScopedRouteConfiguration_KeyValidationError is the validation error returned +// by ScopedRouteConfiguration_Key.Validate if the designated constraints +// aren't met. +type ScopedRouteConfiguration_KeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfiguration_KeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfiguration_KeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfiguration_KeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfiguration_KeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfiguration_KeyValidationError) ErrorName() string { + return "ScopedRouteConfiguration_KeyValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfiguration_KeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration_Key.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfiguration_KeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfiguration_KeyValidationError{} + +// Validate checks the field values on ScopedRouteConfiguration_Key_Fragment +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ScopedRouteConfiguration_Key_Fragment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration_Key_Fragment +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ScopedRouteConfiguration_Key_FragmentMultiError, or nil if none found. +func (m *ScopedRouteConfiguration_Key_Fragment) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration_Key_Fragment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofTypePresent := false + switch v := m.Type.(type) { + case *ScopedRouteConfiguration_Key_Fragment_StringKey: + if v == nil { + err := ScopedRouteConfiguration_Key_FragmentValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + // no validation rules for StringKey + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := ScopedRouteConfiguration_Key_FragmentValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRouteConfiguration_Key_FragmentMultiError(errors) + } + + return nil +} + +// ScopedRouteConfiguration_Key_FragmentMultiError is an error wrapping +// multiple validation errors returned by +// ScopedRouteConfiguration_Key_Fragment.ValidateAll() if the designated +// constraints aren't met. +type ScopedRouteConfiguration_Key_FragmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfiguration_Key_FragmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfiguration_Key_FragmentMultiError) AllErrors() []error { return m } + +// ScopedRouteConfiguration_Key_FragmentValidationError is the validation error +// returned by ScopedRouteConfiguration_Key_Fragment.Validate if the +// designated constraints aren't met. +type ScopedRouteConfiguration_Key_FragmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) ErrorName() string { + return "ScopedRouteConfiguration_Key_FragmentValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration_Key_Fragment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfiguration_Key_FragmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfiguration_Key_FragmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go new file mode 100644 index 000000000..1e6a7e81c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go @@ -0,0 +1,263 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRouteConfiguration_Key_Fragment_StringKey); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringKey) + copy(dAtA[i:], m.StringKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringKey))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ScopedRouteConfiguration_Key) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RouteConfiguration != nil { + size, err := m.RouteConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.OnDemand { + i-- + if m.OnDemand { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RouteConfigurationName) > 0 { + i -= len(m.RouteConfigurationName) + copy(dAtA[i:], m.RouteConfigurationName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigurationName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringKey) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ScopedRouteConfiguration_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigurationName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemand { + n += 2 + } + if m.RouteConfiguration != nil { + l = m.RouteConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go new file mode 100644 index 000000000..284516c99 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go @@ -0,0 +1,1642 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Output format. All output is in the form of one or more :ref:`TraceWrapper +// ` messages. This enumeration indicates +// how those messages are written. Note that not all sinks support all output formats. See +// individual sink documentation for more information. +type OutputSink_Format int32 + +const ( + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + OutputSink_JSON_BODY_AS_BYTES OutputSink_Format = 0 + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + OutputSink_JSON_BODY_AS_STRING OutputSink_Format = 1 + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + OutputSink_PROTO_BINARY OutputSink_Format = 2 + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + OutputSink_PROTO_BINARY_LENGTH_DELIMITED OutputSink_Format = 3 + // Text proto format. + OutputSink_PROTO_TEXT OutputSink_Format = 4 +) + +// Enum value maps for OutputSink_Format. +var ( + OutputSink_Format_name = map[int32]string{ + 0: "JSON_BODY_AS_BYTES", + 1: "JSON_BODY_AS_STRING", + 2: "PROTO_BINARY", + 3: "PROTO_BINARY_LENGTH_DELIMITED", + 4: "PROTO_TEXT", + } + OutputSink_Format_value = map[string]int32{ + "JSON_BODY_AS_BYTES": 0, + "JSON_BODY_AS_STRING": 1, + "PROTO_BINARY": 2, + "PROTO_BINARY_LENGTH_DELIMITED": 3, + "PROTO_TEXT": 4, + } +) + +func (x OutputSink_Format) Enum() *OutputSink_Format { + p := new(OutputSink_Format) + *p = x + return p +} + +func (x OutputSink_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OutputSink_Format) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_tap_v3_common_proto_enumTypes[0].Descriptor() +} + +func (OutputSink_Format) Type() protoreflect.EnumType { + return &file_envoy_config_tap_v3_common_proto_enumTypes[0] +} + +func (x OutputSink_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OutputSink_Format.Descriptor instead. +func (OutputSink_Format) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{5, 0} +} + +// Tap configuration. +type TapConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + // + // Deprecated: Marked as deprecated in envoy/config/tap/v3/common.proto. + MatchConfig *MatchPredicate `protobuf:"bytes,1,opt,name=match_config,json=matchConfig,proto3" json:"match_config,omitempty"` + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + Match *v3.MatchPredicate `protobuf:"bytes,4,opt,name=match,proto3" json:"match,omitempty"` + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig *OutputConfig `protobuf:"bytes,2,opt,name=output_config,json=outputConfig,proto3" json:"output_config,omitempty"` + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + TapEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,3,opt,name=tap_enabled,json=tapEnabled,proto3" json:"tap_enabled,omitempty"` +} + +func (x *TapConfig) Reset() { + *x = TapConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TapConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TapConfig) ProtoMessage() {} + +func (x *TapConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TapConfig.ProtoReflect.Descriptor instead. +func (*TapConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/tap/v3/common.proto. +func (x *TapConfig) GetMatchConfig() *MatchPredicate { + if x != nil { + return x.MatchConfig + } + return nil +} + +func (x *TapConfig) GetMatch() *v3.MatchPredicate { + if x != nil { + return x.Match + } + return nil +} + +func (x *TapConfig) GetOutputConfig() *OutputConfig { + if x != nil { + return x.OutputConfig + } + return nil +} + +func (x *TapConfig) GetTapEnabled() *v31.RuntimeFractionalPercent { + if x != nil { + return x.TapEnabled + } + return nil +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +type MatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *MatchPredicate_OrMatch + // *MatchPredicate_AndMatch + // *MatchPredicate_NotMatch + // *MatchPredicate_AnyMatch + // *MatchPredicate_HttpRequestHeadersMatch + // *MatchPredicate_HttpRequestTrailersMatch + // *MatchPredicate_HttpResponseHeadersMatch + // *MatchPredicate_HttpResponseTrailersMatch + // *MatchPredicate_HttpRequestGenericBodyMatch + // *MatchPredicate_HttpResponseGenericBodyMatch + Rule isMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *MatchPredicate) Reset() { + *x = MatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate) ProtoMessage() {} + +func (x *MatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate.ProtoReflect.Descriptor instead. +func (*MatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (m *MatchPredicate) GetRule() isMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *MatchPredicate) GetOrMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *MatchPredicate) GetAndMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *MatchPredicate) GetNotMatch() *MatchPredicate { + if x, ok := x.GetRule().(*MatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *MatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*MatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *MatchPredicate) GetHttpRequestHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestHeadersMatch); ok { + return x.HttpRequestHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestTrailersMatch); ok { + return x.HttpRequestTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseHeadersMatch); ok { + return x.HttpResponseHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseTrailersMatch); ok { + return x.HttpResponseTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + return x.HttpRequestGenericBodyMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + return x.HttpResponseGenericBodyMatch + } + return nil +} + +type isMatchPredicate_Rule interface { + isMatchPredicate_Rule() +} + +type MatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *MatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type MatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *MatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type MatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *MatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type MatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestHeadersMatch struct { + // HTTP request headers match configuration. + HttpRequestHeadersMatch *HttpHeadersMatch `protobuf:"bytes,5,opt,name=http_request_headers_match,json=httpRequestHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestTrailersMatch struct { + // HTTP request trailers match configuration. + HttpRequestTrailersMatch *HttpHeadersMatch `protobuf:"bytes,6,opt,name=http_request_trailers_match,json=httpRequestTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseHeadersMatch struct { + // HTTP response headers match configuration. + HttpResponseHeadersMatch *HttpHeadersMatch `protobuf:"bytes,7,opt,name=http_response_headers_match,json=httpResponseHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseTrailersMatch struct { + // HTTP response trailers match configuration. + HttpResponseTrailersMatch *HttpHeadersMatch `protobuf:"bytes,8,opt,name=http_response_trailers_match,json=httpResponseTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestGenericBodyMatch struct { + // HTTP request generic body match configuration. + HttpRequestGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,9,opt,name=http_request_generic_body_match,json=httpRequestGenericBodyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseGenericBodyMatch struct { + // HTTP response generic body match configuration. + HttpResponseGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,10,opt,name=http_response_generic_body_match,json=httpResponseGenericBodyMatch,proto3,oneof"` +} + +func (*MatchPredicate_OrMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AndMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_NotMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AnyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestGenericBodyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseGenericBodyMatch) isMatchPredicate_Rule() {} + +// HTTP headers match configuration. +type HttpHeadersMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // HTTP headers to match. + Headers []*v32.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HttpHeadersMatch) Reset() { + *x = HttpHeadersMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpHeadersMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpHeadersMatch) ProtoMessage() {} + +func (x *HttpHeadersMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpHeadersMatch.ProtoReflect.Descriptor instead. +func (*HttpHeadersMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpHeadersMatch) GetHeaders() []*v32.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +type HttpGenericBodyMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + BytesLimit uint32 `protobuf:"varint,1,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` + // List of patterns to match. + Patterns []*HttpGenericBodyMatch_GenericTextMatch `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *HttpGenericBodyMatch) Reset() { + *x = HttpGenericBodyMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpGenericBodyMatch) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +func (x *HttpGenericBodyMatch) GetPatterns() []*HttpGenericBodyMatch_GenericTextMatch { + if x != nil { + return x.Patterns + } + return nil +} + +// Tap output configuration. +type OutputConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + Sinks []*OutputSink `protobuf:"bytes,1,rep,name=sinks,proto3" json:"sinks,omitempty"` + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + MaxBufferedRxBytes *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_buffered_rx_bytes,json=maxBufferedRxBytes,proto3" json:"max_buffered_rx_bytes,omitempty"` + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + MaxBufferedTxBytes *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_buffered_tx_bytes,json=maxBufferedTxBytes,proto3" json:"max_buffered_tx_bytes,omitempty"` + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + Streaming bool `protobuf:"varint,4,opt,name=streaming,proto3" json:"streaming,omitempty"` +} + +func (x *OutputConfig) Reset() { + *x = OutputConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputConfig) ProtoMessage() {} + +func (x *OutputConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputConfig.ProtoReflect.Descriptor instead. +func (*OutputConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{4} +} + +func (x *OutputConfig) GetSinks() []*OutputSink { + if x != nil { + return x.Sinks + } + return nil +} + +func (x *OutputConfig) GetMaxBufferedRxBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxBufferedRxBytes + } + return nil +} + +func (x *OutputConfig) GetMaxBufferedTxBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxBufferedTxBytes + } + return nil +} + +func (x *OutputConfig) GetStreaming() bool { + if x != nil { + return x.Streaming + } + return false +} + +// Tap output sink configuration. +// [#next-free-field: 7] +type OutputSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Sink output format. + Format OutputSink_Format `protobuf:"varint,1,opt,name=format,proto3,enum=envoy.config.tap.v3.OutputSink_Format" json:"format,omitempty"` + // Types that are assignable to OutputSinkType: + // + // *OutputSink_StreamingAdmin + // *OutputSink_FilePerTap + // *OutputSink_StreamingGrpc + // *OutputSink_BufferedAdmin + // *OutputSink_CustomSink + OutputSinkType isOutputSink_OutputSinkType `protobuf_oneof:"output_sink_type"` +} + +func (x *OutputSink) Reset() { + *x = OutputSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputSink) ProtoMessage() {} + +func (x *OutputSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputSink.ProtoReflect.Descriptor instead. +func (*OutputSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{5} +} + +func (x *OutputSink) GetFormat() OutputSink_Format { + if x != nil { + return x.Format + } + return OutputSink_JSON_BODY_AS_BYTES +} + +func (m *OutputSink) GetOutputSinkType() isOutputSink_OutputSinkType { + if m != nil { + return m.OutputSinkType + } + return nil +} + +func (x *OutputSink) GetStreamingAdmin() *StreamingAdminSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_StreamingAdmin); ok { + return x.StreamingAdmin + } + return nil +} + +func (x *OutputSink) GetFilePerTap() *FilePerTapSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_FilePerTap); ok { + return x.FilePerTap + } + return nil +} + +func (x *OutputSink) GetStreamingGrpc() *StreamingGrpcSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_StreamingGrpc); ok { + return x.StreamingGrpc + } + return nil +} + +func (x *OutputSink) GetBufferedAdmin() *BufferedAdminSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_BufferedAdmin); ok { + return x.BufferedAdmin + } + return nil +} + +func (x *OutputSink) GetCustomSink() *v31.TypedExtensionConfig { + if x, ok := x.GetOutputSinkType().(*OutputSink_CustomSink); ok { + return x.CustomSink + } + return nil +} + +type isOutputSink_OutputSinkType interface { + isOutputSink_OutputSinkType() +} + +type OutputSink_StreamingAdmin struct { + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdmin *StreamingAdminSink `protobuf:"bytes,2,opt,name=streaming_admin,json=streamingAdmin,proto3,oneof"` +} + +type OutputSink_FilePerTap struct { + // Tap output will be written to a file per tap sink. + FilePerTap *FilePerTapSink `protobuf:"bytes,3,opt,name=file_per_tap,json=filePerTap,proto3,oneof"` +} + +type OutputSink_StreamingGrpc struct { + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] + StreamingGrpc *StreamingGrpcSink `protobuf:"bytes,4,opt,name=streaming_grpc,json=streamingGrpc,proto3,oneof"` +} + +type OutputSink_BufferedAdmin struct { + // Tap output will be buffered in a single block before flushing to the :http:post:`/tap` admin endpoint + // + // .. attention:: + // + // It is only allowed to specify the buffered admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the buffered admin output type will fail. + BufferedAdmin *BufferedAdminSink `protobuf:"bytes,5,opt,name=buffered_admin,json=bufferedAdmin,proto3,oneof"` +} + +type OutputSink_CustomSink struct { + // Tap output filter will be defined by an extension type + CustomSink *v31.TypedExtensionConfig `protobuf:"bytes,6,opt,name=custom_sink,json=customSink,proto3,oneof"` +} + +func (*OutputSink_StreamingAdmin) isOutputSink_OutputSinkType() {} + +func (*OutputSink_FilePerTap) isOutputSink_OutputSinkType() {} + +func (*OutputSink_StreamingGrpc) isOutputSink_OutputSinkType() {} + +func (*OutputSink_BufferedAdmin) isOutputSink_OutputSinkType() {} + +func (*OutputSink_CustomSink) isOutputSink_OutputSinkType() {} + +// Streaming admin sink configuration. +type StreamingAdminSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StreamingAdminSink) Reset() { + *x = StreamingAdminSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingAdminSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingAdminSink) ProtoMessage() {} + +func (x *StreamingAdminSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingAdminSink.ProtoReflect.Descriptor instead. +func (*StreamingAdminSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{6} +} + +// BufferedAdminSink configures a tap output to collect traces without returning them until +// one of multiple criteria are satisfied. +// Similar to StreamingAdminSink, it is only allowed to specify the buffered admin output +// sink if the tap is being configured from the “/tap“ admin endpoint. +type BufferedAdminSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stop collecting traces when the specified number are collected. + // If other criteria for ending collection are reached first, this value will not be used. + MaxTraces uint64 `protobuf:"varint,1,opt,name=max_traces,json=maxTraces,proto3" json:"max_traces,omitempty"` + // Acts as a fallback to prevent the client from waiting for long periods of time. + // After timeout has occurred, a buffer flush will be triggered, returning the traces buffered so far. + // This may result in returning fewer traces than were requested, and in the case that no traces are + // buffered during this time, no traces will be returned. + // Specifying 0 for the timeout value (or not specifying a value at all) indicates an infinite timeout. + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *BufferedAdminSink) Reset() { + *x = BufferedAdminSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BufferedAdminSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BufferedAdminSink) ProtoMessage() {} + +func (x *BufferedAdminSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BufferedAdminSink.ProtoReflect.Descriptor instead. +func (*BufferedAdminSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{7} +} + +func (x *BufferedAdminSink) GetMaxTraces() uint64 { + if x != nil { + return x.MaxTraces + } + return 0 +} + +func (x *BufferedAdminSink) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +// The file per tap sink outputs a discrete file for every tapped stream. +type FilePerTapSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + PathPrefix string `protobuf:"bytes,1,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` +} + +func (x *FilePerTapSink) Reset() { + *x = FilePerTapSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilePerTapSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilePerTapSink) ProtoMessage() {} + +func (x *FilePerTapSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilePerTapSink.ProtoReflect.Descriptor instead. +func (*FilePerTapSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{8} +} + +func (x *FilePerTapSink) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +type StreamingGrpcSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Opaque identifier, that will be sent back to the streaming grpc server. + TapId string `protobuf:"bytes,1,opt,name=tap_id,json=tapId,proto3" json:"tap_id,omitempty"` + // The gRPC server that hosts the Tap Sink Service. + GrpcService *v31.GrpcService `protobuf:"bytes,2,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` +} + +func (x *StreamingGrpcSink) Reset() { + *x = StreamingGrpcSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingGrpcSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingGrpcSink) ProtoMessage() {} + +func (x *StreamingGrpcSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingGrpcSink.ProtoReflect.Descriptor instead. +func (*StreamingGrpcSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{9} +} + +func (x *StreamingGrpcSink) GetTapId() string { + if x != nil { + return x.TapId + } + return "" +} + +func (x *StreamingGrpcSink) GetGrpcService() *v31.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +// A set of match configurations used for logical operations. +type MatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*MatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *MatchPredicate_MatchSet) Reset() { + *x = MatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate_MatchSet) ProtoMessage() {} + +func (x *MatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*MatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *MatchPredicate_MatchSet) GetRules() []*MatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +type HttpGenericBodyMatch_GenericTextMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *HttpGenericBodyMatch_GenericTextMatch_StringMatch + // *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch + Rule isHttpGenericBodyMatch_GenericTextMatch_Rule `protobuf_oneof:"rule"` +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) Reset() { + *x = HttpGenericBodyMatch_GenericTextMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch_GenericTextMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch_GenericTextMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch_GenericTextMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch_GenericTextMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) GetRule() isHttpGenericBodyMatch_GenericTextMatch_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetStringMatch() string { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + return x.StringMatch + } + return "" +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetBinaryMatch() []byte { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + return x.BinaryMatch + } + return nil +} + +type isHttpGenericBodyMatch_GenericTextMatch_Rule interface { + isHttpGenericBodyMatch_GenericTextMatch_Rule() +} + +type HttpGenericBodyMatch_GenericTextMatch_StringMatch struct { + // Text string to be located in HTTP body. + StringMatch string `protobuf:"bytes,1,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type HttpGenericBodyMatch_GenericTextMatch_BinaryMatch struct { + // Sequence of bytes to be located in HTTP body. + BinaryMatch []byte `protobuf:"bytes,2,opt,name=binary_match,json=binaryMatch,proto3,oneof"` +} + +func (*HttpGenericBodyMatch_GenericTextMatch_StringMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +func (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +var File_envoy_config_tap_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_config_tap_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x02, 0x0a, 0x09, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x53, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x0d, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, + 0x0a, 0x0b, 0x74, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x74, 0x61, 0x70, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, + 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xe6, 0x08, 0x0a, 0x0e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x49, + 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, + 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x4b, 0x0a, 0x09, 0x61, 0x6e, 0x64, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x42, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, 0x6e, + 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x64, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, + 0x17, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x66, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x66, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, + 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x68, 0x0a, 0x1c, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x19, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, + 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x73, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, + 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1c, 0x68, 0x74, + 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x89, 0x01, 0x0a, 0x08, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x22, 0x85, 0x01, 0x0a, 0x10, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x96, 0x02, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x60, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x1a, 0x7b, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xc0, 0x02, 0x0a, 0x0c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x92, 0x01, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x6d, 0x61, 0x78, + 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x65, 0x64, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x6d, 0x61, + 0x78, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x65, 0x64, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xaa, 0x05, 0x0a, 0x0a, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x48, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x52, 0x0a, 0x0f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, + 0x69, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x53, 0x69, 0x6e, 0x6b, + 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x12, 0x4f, + 0x0a, 0x0e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x70, 0x63, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x53, 0x69, 0x6e, 0x6b, 0x48, 0x00, + 0x52, 0x0d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x12, + 0x4f, 0x0a, 0x0e, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, + 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x48, + 0x00, 0x52, 0x0d, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x4d, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x69, 0x6e, 0x6b, 0x22, + 0x7e, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x4a, 0x53, 0x4f, + 0x4e, 0x5f, 0x42, 0x4f, 0x44, 0x59, 0x5f, 0x41, 0x53, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x00, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x42, 0x4f, 0x44, 0x59, 0x5f, 0x41, + 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0e, 0x0a, 0x0a, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x54, 0x45, 0x58, 0x54, 0x10, 0x04, 0x3a, + 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x17, 0x0a, 0x10, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x49, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, + 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, + 0x22, 0x70, 0x0a, 0x11, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x26, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x20, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x12, 0x33, 0x0a, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x22, 0x6b, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, + 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x3a, 0x2f, + 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x53, 0x69, 0x6e, 0x6b, 0x22, + 0xae, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x70, 0x49, 0x64, 0x12, 0x4e, 0x0a, 0x0c, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x32, 0x9a, 0xc5, + 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x53, 0x69, 0x6e, 0x6b, + 0x42, 0x7c, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_tap_v3_common_proto_rawDescOnce sync.Once + file_envoy_config_tap_v3_common_proto_rawDescData = file_envoy_config_tap_v3_common_proto_rawDesc +) + +func file_envoy_config_tap_v3_common_proto_rawDescGZIP() []byte { + file_envoy_config_tap_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_config_tap_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_tap_v3_common_proto_rawDescData) + }) + return file_envoy_config_tap_v3_common_proto_rawDescData +} + +var file_envoy_config_tap_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_tap_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_envoy_config_tap_v3_common_proto_goTypes = []interface{}{ + (OutputSink_Format)(0), // 0: envoy.config.tap.v3.OutputSink.Format + (*TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig + (*MatchPredicate)(nil), // 2: envoy.config.tap.v3.MatchPredicate + (*HttpHeadersMatch)(nil), // 3: envoy.config.tap.v3.HttpHeadersMatch + (*HttpGenericBodyMatch)(nil), // 4: envoy.config.tap.v3.HttpGenericBodyMatch + (*OutputConfig)(nil), // 5: envoy.config.tap.v3.OutputConfig + (*OutputSink)(nil), // 6: envoy.config.tap.v3.OutputSink + (*StreamingAdminSink)(nil), // 7: envoy.config.tap.v3.StreamingAdminSink + (*BufferedAdminSink)(nil), // 8: envoy.config.tap.v3.BufferedAdminSink + (*FilePerTapSink)(nil), // 9: envoy.config.tap.v3.FilePerTapSink + (*StreamingGrpcSink)(nil), // 10: envoy.config.tap.v3.StreamingGrpcSink + (*MatchPredicate_MatchSet)(nil), // 11: envoy.config.tap.v3.MatchPredicate.MatchSet + (*HttpGenericBodyMatch_GenericTextMatch)(nil), // 12: envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch + (*v3.MatchPredicate)(nil), // 13: envoy.config.common.matcher.v3.MatchPredicate + (*v31.RuntimeFractionalPercent)(nil), // 14: envoy.config.core.v3.RuntimeFractionalPercent + (*v32.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*wrapperspb.UInt32Value)(nil), // 16: google.protobuf.UInt32Value + (*v31.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*v31.GrpcService)(nil), // 19: envoy.config.core.v3.GrpcService +} +var file_envoy_config_tap_v3_common_proto_depIdxs = []int32{ + 2, // 0: envoy.config.tap.v3.TapConfig.match_config:type_name -> envoy.config.tap.v3.MatchPredicate + 13, // 1: envoy.config.tap.v3.TapConfig.match:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 5, // 2: envoy.config.tap.v3.TapConfig.output_config:type_name -> envoy.config.tap.v3.OutputConfig + 14, // 3: envoy.config.tap.v3.TapConfig.tap_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 11, // 4: envoy.config.tap.v3.MatchPredicate.or_match:type_name -> envoy.config.tap.v3.MatchPredicate.MatchSet + 11, // 5: envoy.config.tap.v3.MatchPredicate.and_match:type_name -> envoy.config.tap.v3.MatchPredicate.MatchSet + 2, // 6: envoy.config.tap.v3.MatchPredicate.not_match:type_name -> envoy.config.tap.v3.MatchPredicate + 3, // 7: envoy.config.tap.v3.MatchPredicate.http_request_headers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 8: envoy.config.tap.v3.MatchPredicate.http_request_trailers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 9: envoy.config.tap.v3.MatchPredicate.http_response_headers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 10: envoy.config.tap.v3.MatchPredicate.http_response_trailers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 4, // 11: envoy.config.tap.v3.MatchPredicate.http_request_generic_body_match:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch + 4, // 12: envoy.config.tap.v3.MatchPredicate.http_response_generic_body_match:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch + 15, // 13: envoy.config.tap.v3.HttpHeadersMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 12, // 14: envoy.config.tap.v3.HttpGenericBodyMatch.patterns:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch + 6, // 15: envoy.config.tap.v3.OutputConfig.sinks:type_name -> envoy.config.tap.v3.OutputSink + 16, // 16: envoy.config.tap.v3.OutputConfig.max_buffered_rx_bytes:type_name -> google.protobuf.UInt32Value + 16, // 17: envoy.config.tap.v3.OutputConfig.max_buffered_tx_bytes:type_name -> google.protobuf.UInt32Value + 0, // 18: envoy.config.tap.v3.OutputSink.format:type_name -> envoy.config.tap.v3.OutputSink.Format + 7, // 19: envoy.config.tap.v3.OutputSink.streaming_admin:type_name -> envoy.config.tap.v3.StreamingAdminSink + 9, // 20: envoy.config.tap.v3.OutputSink.file_per_tap:type_name -> envoy.config.tap.v3.FilePerTapSink + 10, // 21: envoy.config.tap.v3.OutputSink.streaming_grpc:type_name -> envoy.config.tap.v3.StreamingGrpcSink + 8, // 22: envoy.config.tap.v3.OutputSink.buffered_admin:type_name -> envoy.config.tap.v3.BufferedAdminSink + 17, // 23: envoy.config.tap.v3.OutputSink.custom_sink:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 24: envoy.config.tap.v3.BufferedAdminSink.timeout:type_name -> google.protobuf.Duration + 19, // 25: envoy.config.tap.v3.StreamingGrpcSink.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 2, // 26: envoy.config.tap.v3.MatchPredicate.MatchSet.rules:type_name -> envoy.config.tap.v3.MatchPredicate + 27, // [27:27] is the sub-list for method output_type + 27, // [27:27] is the sub-list for method input_type + 27, // [27:27] is the sub-list for extension type_name + 27, // [27:27] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name +} + +func init() { file_envoy_config_tap_v3_common_proto_init() } +func file_envoy_config_tap_v3_common_proto_init() { + if File_envoy_config_tap_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_tap_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TapConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpHeadersMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingAdminSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BufferedAdminSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilePerTapSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingGrpcSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch_GenericTextMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MatchPredicate_OrMatch)(nil), + (*MatchPredicate_AndMatch)(nil), + (*MatchPredicate_NotMatch)(nil), + (*MatchPredicate_AnyMatch)(nil), + (*MatchPredicate_HttpRequestHeadersMatch)(nil), + (*MatchPredicate_HttpRequestTrailersMatch)(nil), + (*MatchPredicate_HttpResponseHeadersMatch)(nil), + (*MatchPredicate_HttpResponseTrailersMatch)(nil), + (*MatchPredicate_HttpRequestGenericBodyMatch)(nil), + (*MatchPredicate_HttpResponseGenericBodyMatch)(nil), + } + file_envoy_config_tap_v3_common_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OutputSink_StreamingAdmin)(nil), + (*OutputSink_FilePerTap)(nil), + (*OutputSink_StreamingGrpc)(nil), + (*OutputSink_BufferedAdmin)(nil), + (*OutputSink_CustomSink)(nil), + } + file_envoy_config_tap_v3_common_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*HttpGenericBodyMatch_GenericTextMatch_StringMatch)(nil), + (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_tap_v3_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_tap_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_config_tap_v3_common_proto_depIdxs, + EnumInfos: file_envoy_config_tap_v3_common_proto_enumTypes, + MessageInfos: file_envoy_config_tap_v3_common_proto_msgTypes, + }.Build() + File_envoy_config_tap_v3_common_proto = out.File + file_envoy_config_tap_v3_common_proto_rawDesc = nil + file_envoy_config_tap_v3_common_proto_goTypes = nil + file_envoy_config_tap_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go new file mode 100644 index 000000000..04df840e1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go @@ -0,0 +1,2419 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TapConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TapConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TapConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TapConfigMultiError, or nil +// if none found. +func (m *TapConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TapConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMatchConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatchConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOutputConfig() == nil { + err := TapConfigValidationError{ + field: "OutputConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTapEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTapEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TapConfigMultiError(errors) + } + + return nil +} + +// TapConfigMultiError is an error wrapping multiple validation errors returned +// by TapConfig.ValidateAll() if the designated constraints aren't met. +type TapConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TapConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TapConfigMultiError) AllErrors() []error { return m } + +// TapConfigValidationError is the validation error returned by +// TapConfig.Validate if the designated constraints aren't met. +type TapConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TapConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TapConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TapConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TapConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TapConfigValidationError) ErrorName() string { return "TapConfigValidationError" } + +// Error satisfies the builtin error interface +func (e TapConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTapConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TapConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TapConfigValidationError{} + +// Validate checks the field values on MatchPredicate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MatchPredicateMultiError, +// or nil if none found. +func (m *MatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := MatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatchPredicateMultiError(errors) + } + + return nil +} + +// MatchPredicateMultiError is an error wrapping multiple validation errors +// returned by MatchPredicate.ValidateAll() if the designated constraints +// aren't met. +type MatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicateMultiError) AllErrors() []error { return m } + +// MatchPredicateValidationError is the validation error returned by +// MatchPredicate.Validate if the designated constraints aren't met. +type MatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicateValidationError) ErrorName() string { return "MatchPredicateValidationError" } + +// Error satisfies the builtin error interface +func (e MatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicateValidationError{} + +// Validate checks the field values on HttpHeadersMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HttpHeadersMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpHeadersMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpHeadersMatchMultiError, or nil if none found. +func (m *HttpHeadersMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpHeadersMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpHeadersMatchMultiError(errors) + } + + return nil +} + +// HttpHeadersMatchMultiError is an error wrapping multiple validation errors +// returned by HttpHeadersMatch.ValidateAll() if the designated constraints +// aren't met. +type HttpHeadersMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpHeadersMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpHeadersMatchMultiError) AllErrors() []error { return m } + +// HttpHeadersMatchValidationError is the validation error returned by +// HttpHeadersMatch.Validate if the designated constraints aren't met. +type HttpHeadersMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpHeadersMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpHeadersMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpHeadersMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpHeadersMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpHeadersMatchValidationError) ErrorName() string { return "HttpHeadersMatchValidationError" } + +// Error satisfies the builtin error interface +func (e HttpHeadersMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpHeadersMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpHeadersMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpHeadersMatchValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpGenericBodyMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesLimit + + if len(m.GetPatterns()) < 1 { + err := HttpGenericBodyMatchValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpGenericBodyMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatchMultiError is an error wrapping multiple validation +// errors returned by HttpGenericBodyMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatchValidationError is the validation error returned by +// HttpGenericBodyMatch.Validate if the designated constraints aren't met. +type HttpGenericBodyMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatchValidationError{} + +// Validate checks the field values on OutputConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OutputConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutputConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OutputConfigMultiError, or +// nil if none found. +func (m *OutputConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OutputConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetSinks()) != 1 { + err := OutputConfigValidationError{ + field: "Sinks", + reason: "value must contain exactly 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetSinks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxBufferedRxBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxBufferedRxBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxBufferedTxBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxBufferedTxBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Streaming + + if len(errors) > 0 { + return OutputConfigMultiError(errors) + } + + return nil +} + +// OutputConfigMultiError is an error wrapping multiple validation errors +// returned by OutputConfig.ValidateAll() if the designated constraints aren't met. +type OutputConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutputConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutputConfigMultiError) AllErrors() []error { return m } + +// OutputConfigValidationError is the validation error returned by +// OutputConfig.Validate if the designated constraints aren't met. +type OutputConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutputConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutputConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutputConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutputConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutputConfigValidationError) ErrorName() string { return "OutputConfigValidationError" } + +// Error satisfies the builtin error interface +func (e OutputConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutputConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutputConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutputConfigValidationError{} + +// Validate checks the field values on OutputSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OutputSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutputSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OutputSinkMultiError, or +// nil if none found. +func (m *OutputSink) ValidateAll() error { + return m.validate(true) +} + +func (m *OutputSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := OutputSink_Format_name[int32(m.GetFormat())]; !ok { + err := OutputSinkValidationError{ + field: "Format", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofOutputSinkTypePresent := false + switch v := m.OutputSinkType.(type) { + case *OutputSink_StreamingAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetStreamingAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamingAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_FilePerTap: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetFilePerTap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilePerTap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_StreamingGrpc: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetStreamingGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamingGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_BufferedAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetBufferedAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBufferedAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_CustomSink: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetCustomSink()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomSink()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOutputSinkTypePresent { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return OutputSinkMultiError(errors) + } + + return nil +} + +// OutputSinkMultiError is an error wrapping multiple validation errors +// returned by OutputSink.ValidateAll() if the designated constraints aren't met. +type OutputSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutputSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutputSinkMultiError) AllErrors() []error { return m } + +// OutputSinkValidationError is the validation error returned by +// OutputSink.Validate if the designated constraints aren't met. +type OutputSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutputSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutputSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutputSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutputSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutputSinkValidationError) ErrorName() string { return "OutputSinkValidationError" } + +// Error satisfies the builtin error interface +func (e OutputSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutputSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutputSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutputSinkValidationError{} + +// Validate checks the field values on StreamingAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *StreamingAdminSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StreamingAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StreamingAdminSinkMultiError, or nil if none found. +func (m *StreamingAdminSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StreamingAdminSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StreamingAdminSinkMultiError(errors) + } + + return nil +} + +// StreamingAdminSinkMultiError is an error wrapping multiple validation errors +// returned by StreamingAdminSink.ValidateAll() if the designated constraints +// aren't met. +type StreamingAdminSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StreamingAdminSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StreamingAdminSinkMultiError) AllErrors() []error { return m } + +// StreamingAdminSinkValidationError is the validation error returned by +// StreamingAdminSink.Validate if the designated constraints aren't met. +type StreamingAdminSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StreamingAdminSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StreamingAdminSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StreamingAdminSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StreamingAdminSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StreamingAdminSinkValidationError) ErrorName() string { + return "StreamingAdminSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e StreamingAdminSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStreamingAdminSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StreamingAdminSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StreamingAdminSinkValidationError{} + +// Validate checks the field values on BufferedAdminSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *BufferedAdminSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BufferedAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BufferedAdminSinkMultiError, or nil if none found. +func (m *BufferedAdminSink) ValidateAll() error { + return m.validate(true) +} + +func (m *BufferedAdminSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMaxTraces() <= 0 { + err := BufferedAdminSinkValidationError{ + field: "MaxTraces", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BufferedAdminSinkMultiError(errors) + } + + return nil +} + +// BufferedAdminSinkMultiError is an error wrapping multiple validation errors +// returned by BufferedAdminSink.ValidateAll() if the designated constraints +// aren't met. +type BufferedAdminSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BufferedAdminSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BufferedAdminSinkMultiError) AllErrors() []error { return m } + +// BufferedAdminSinkValidationError is the validation error returned by +// BufferedAdminSink.Validate if the designated constraints aren't met. +type BufferedAdminSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BufferedAdminSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BufferedAdminSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BufferedAdminSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BufferedAdminSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BufferedAdminSinkValidationError) ErrorName() string { + return "BufferedAdminSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e BufferedAdminSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBufferedAdminSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BufferedAdminSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BufferedAdminSinkValidationError{} + +// Validate checks the field values on FilePerTapSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilePerTapSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilePerTapSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilePerTapSinkMultiError, +// or nil if none found. +func (m *FilePerTapSink) ValidateAll() error { + return m.validate(true) +} + +func (m *FilePerTapSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPathPrefix()) < 1 { + err := FilePerTapSinkValidationError{ + field: "PathPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FilePerTapSinkMultiError(errors) + } + + return nil +} + +// FilePerTapSinkMultiError is an error wrapping multiple validation errors +// returned by FilePerTapSink.ValidateAll() if the designated constraints +// aren't met. +type FilePerTapSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilePerTapSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilePerTapSinkMultiError) AllErrors() []error { return m } + +// FilePerTapSinkValidationError is the validation error returned by +// FilePerTapSink.Validate if the designated constraints aren't met. +type FilePerTapSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilePerTapSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilePerTapSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilePerTapSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilePerTapSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilePerTapSinkValidationError) ErrorName() string { return "FilePerTapSinkValidationError" } + +// Error satisfies the builtin error interface +func (e FilePerTapSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilePerTapSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilePerTapSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilePerTapSinkValidationError{} + +// Validate checks the field values on StreamingGrpcSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StreamingGrpcSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StreamingGrpcSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StreamingGrpcSinkMultiError, or nil if none found. +func (m *StreamingGrpcSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StreamingGrpcSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TapId + + if m.GetGrpcService() == nil { + err := StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StreamingGrpcSinkMultiError(errors) + } + + return nil +} + +// StreamingGrpcSinkMultiError is an error wrapping multiple validation errors +// returned by StreamingGrpcSink.ValidateAll() if the designated constraints +// aren't met. +type StreamingGrpcSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StreamingGrpcSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StreamingGrpcSinkMultiError) AllErrors() []error { return m } + +// StreamingGrpcSinkValidationError is the validation error returned by +// StreamingGrpcSink.Validate if the designated constraints aren't met. +type StreamingGrpcSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StreamingGrpcSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StreamingGrpcSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StreamingGrpcSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StreamingGrpcSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StreamingGrpcSinkValidationError) ErrorName() string { + return "StreamingGrpcSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e StreamingGrpcSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStreamingGrpcSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StreamingGrpcSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StreamingGrpcSinkValidationError{} + +// Validate checks the field values on MatchPredicate_MatchSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate_MatchSet with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MatchPredicate_MatchSetMultiError, or nil if none found. +func (m *MatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := MatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// MatchPredicate_MatchSetMultiError is an error wrapping multiple validation +// errors returned by MatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type MatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// MatchPredicate_MatchSetValidationError is the validation error returned by +// MatchPredicate_MatchSet.Validate if the designated constraints aren't met. +type MatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicate_MatchSetValidationError) ErrorName() string { + return "MatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e MatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicate_MatchSetValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpGenericBodyMatch_GenericTextMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatch_GenericTextMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch_GenericTextMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if utf8.RuneCountInString(m.GetStringMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "StringMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if len(m.GetBinaryMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "BinaryMatch", + reason: "value length must be at least 1 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpGenericBodyMatch_GenericTextMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatch_GenericTextMatchMultiError is an error wrapping +// multiple validation errors returned by +// HttpGenericBodyMatch_GenericTextMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatch_GenericTextMatchValidationError is the validation error +// returned by HttpGenericBodyMatch_GenericTextMatch.Validate if the +// designated constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatch_GenericTextMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch_GenericTextMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatch_GenericTextMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatch_GenericTextMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go new file mode 100644 index 000000000..9cedeecd0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go @@ -0,0 +1,1591 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.TapEnabled != nil { + if vtmsg, ok := interface{}(m.TapEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutputConfig != nil { + size, err := m.OutputConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MatchConfig != nil { + size, err := m.MatchConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Streaming { + i-- + if m.Streaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxBufferedTxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxBufferedRxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Sinks) > 0 { + for iNdEx := len(m.Sinks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Sinks[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OutputSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OutputSinkType.(*OutputSink_CustomSink); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_BufferedAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingGrpc); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_FilePerTap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputSink_StreamingAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingAdmin != nil { + size, err := m.StreamingAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_FilePerTap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_FilePerTap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilePerTap != nil { + size, err := m.FilePerTap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_StreamingGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingGrpc != nil { + size, err := m.StreamingGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_BufferedAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_BufferedAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BufferedAdmin != nil { + size, err := m.BufferedAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_CustomSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_CustomSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomSink != nil { + if vtmsg, ok := interface{}(m.CustomSink).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomSink) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *StreamingAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *BufferedAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferedAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferedAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTraces != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTraces)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FilePerTapSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilePerTapSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilePerTapSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PathPrefix) > 0 { + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamingGrpcSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingGrpcSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingGrpcSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.TapId) > 0 { + i -= len(m.TapId) + copy(dAtA[i:], m.TapId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TapId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchConfig != nil { + l = m.MatchConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutputConfig != nil { + l = m.OutputConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapEnabled != nil { + if size, ok := interface{}(m.TapEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OutputConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sinks) > 0 { + for _, e := range m.Sinks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxBufferedRxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBufferedTxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Streaming { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + if vtmsg, ok := m.OutputSinkType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink_StreamingAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingAdmin != nil { + l = m.StreamingAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_FilePerTap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilePerTap != nil { + l = m.FilePerTap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_StreamingGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingGrpc != nil { + l = m.StreamingGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_BufferedAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BufferedAdmin != nil { + l = m.BufferedAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_CustomSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomSink != nil { + if size, ok := interface{}(m.CustomSink).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomSink) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StreamingAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *BufferedAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTraces != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTraces)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilePerTapSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StreamingGrpcSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TapId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go new file mode 100644 index 000000000..1975c6f02 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go @@ -0,0 +1,290 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the Remote Configuration feature. +type DatadogRemoteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Frequency at which new configuration updates are queried. + // If no value is provided, the default value is delegated to the Datadog tracing library. + PollingInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=polling_interval,json=pollingInterval,proto3" json:"polling_interval,omitempty"` +} + +func (x *DatadogRemoteConfig) Reset() { + *x = DatadogRemoteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatadogRemoteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatadogRemoteConfig) ProtoMessage() {} + +func (x *DatadogRemoteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatadogRemoteConfig.ProtoReflect.Descriptor instead. +func (*DatadogRemoteConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{0} +} + +func (x *DatadogRemoteConfig) GetPollingInterval() *durationpb.Duration { + if x != nil { + return x.PollingInterval + } + return nil +} + +// Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] +type DatadogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster to use for submitting traces to the Datadog agent. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // The name used for the service when traces are generated by envoy. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors + // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. + CollectorHostname string `protobuf:"bytes,3,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // Enables and configures remote configuration. + // Remote Configuration allows to configure the tracer from Datadog's user interface. + // This feature can drastically increase the number of connections to the Datadog Agent. + // Each tracer regularly polls for configuration updates, and the number of tracers is the product + // of the number of listeners and worker threads. + RemoteConfig *DatadogRemoteConfig `protobuf:"bytes,4,opt,name=remote_config,json=remoteConfig,proto3" json:"remote_config,omitempty"` +} + +func (x *DatadogConfig) Reset() { + *x = DatadogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatadogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatadogConfig) ProtoMessage() {} + +func (x *DatadogConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatadogConfig.ProtoReflect.Descriptor instead. +func (*DatadogConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{1} +} + +func (x *DatadogConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +func (x *DatadogConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *DatadogConfig) GetCollectorHostname() string { + if x != nil { + return x.CollectorHostname + } + return "" +} + +func (x *DatadogConfig) GetRemoteConfig() *DatadogRemoteConfig { + if x != nil { + return x.RemoteConfig + } + return nil +} + +var File_envoy_config_trace_v3_datadog_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, + 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9d, 0x02, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_datadog_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_datadog_proto_rawDescData = file_envoy_config_trace_v3_datadog_proto_rawDesc +) + +func file_envoy_config_trace_v3_datadog_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_datadog_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_datadog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_datadog_proto_rawDescData) + }) + return file_envoy_config_trace_v3_datadog_proto_rawDescData +} + +var file_envoy_config_trace_v3_datadog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_datadog_proto_goTypes = []interface{}{ + (*DatadogRemoteConfig)(nil), // 0: envoy.config.trace.v3.DatadogRemoteConfig + (*DatadogConfig)(nil), // 1: envoy.config.trace.v3.DatadogConfig + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration +} +var file_envoy_config_trace_v3_datadog_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.DatadogRemoteConfig.polling_interval:type_name -> google.protobuf.Duration + 0, // 1: envoy.config.trace.v3.DatadogConfig.remote_config:type_name -> envoy.config.trace.v3.DatadogRemoteConfig + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_datadog_proto_init() } +func file_envoy_config_trace_v3_datadog_proto_init() { + if File_envoy_config_trace_v3_datadog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_datadog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatadogRemoteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_datadog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatadogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_datadog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_datadog_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_datadog_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_datadog_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_datadog_proto = out.File + file_envoy_config_trace_v3_datadog_proto_rawDesc = nil + file_envoy_config_trace_v3_datadog_proto_goTypes = nil + file_envoy_config_trace_v3_datadog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go new file mode 100644 index 000000000..43ac4c898 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go @@ -0,0 +1,321 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DatadogRemoteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DatadogRemoteConfigMultiError, or nil if none found. +func (m *DatadogRemoteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DatadogRemoteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPollingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPollingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatadogRemoteConfigMultiError(errors) + } + + return nil +} + +// DatadogRemoteConfigMultiError is an error wrapping multiple validation +// errors returned by DatadogRemoteConfig.ValidateAll() if the designated +// constraints aren't met. +type DatadogRemoteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatadogRemoteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatadogRemoteConfigMultiError) AllErrors() []error { return m } + +// DatadogRemoteConfigValidationError is the validation error returned by +// DatadogRemoteConfig.Validate if the designated constraints aren't met. +type DatadogRemoteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatadogRemoteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatadogRemoteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatadogRemoteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatadogRemoteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatadogRemoteConfigValidationError) ErrorName() string { + return "DatadogRemoteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e DatadogRemoteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatadogRemoteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatadogRemoteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatadogRemoteConfigValidationError{} + +// Validate checks the field values on DatadogConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DatadogConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatadogConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DatadogConfigMultiError, or +// nil if none found. +func (m *DatadogConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DatadogConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := DatadogConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetServiceName()) < 1 { + err := DatadogConfigValidationError{ + field: "ServiceName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for CollectorHostname + + if all { + switch v := interface{}(m.GetRemoteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatadogConfigMultiError(errors) + } + + return nil +} + +// DatadogConfigMultiError is an error wrapping multiple validation errors +// returned by DatadogConfig.ValidateAll() if the designated constraints +// aren't met. +type DatadogConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatadogConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatadogConfigMultiError) AllErrors() []error { return m } + +// DatadogConfigValidationError is the validation error returned by +// DatadogConfig.Validate if the designated constraints aren't met. +type DatadogConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatadogConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatadogConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatadogConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatadogConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatadogConfigValidationError) ErrorName() string { return "DatadogConfigValidationError" } + +// Error satisfies the builtin error interface +func (e DatadogConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatadogConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatadogConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatadogConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go new file mode 100644 index 000000000..b4cede7f0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DatadogRemoteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogRemoteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogRemoteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PollingInterval != nil { + size, err := (*durationpb.Duration)(m.PollingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RemoteConfig != nil { + size, err := m.RemoteConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogRemoteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PollingInterval != nil { + l = (*durationpb.Duration)(m.PollingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DatadogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RemoteConfig != nil { + l = m.RemoteConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go new file mode 100644 index 000000000..a3dd5bb95 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// DynamicOtConfig was used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +// [#not-implemented-hide:] +type DynamicOtConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Dynamic library implementing the `OpenTracing API + // `_. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. + Library string `protobuf:"bytes,1,opt,name=library,proto3" json:"library,omitempty"` + // The configuration to use when creating a tracer from the given dynamic + // library. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. + Config *structpb.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *DynamicOtConfig) Reset() { + *x = DynamicOtConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicOtConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicOtConfig) ProtoMessage() {} + +func (x *DynamicOtConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicOtConfig.ProtoReflect.Descriptor instead. +func (*DynamicOtConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_dynamic_ot_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. +func (x *DynamicOtConfig) GetLibrary() string { + if x != nil { + return x.Library + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. +func (x *DynamicOtConfig) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + +var File_envoy_config_trace_v3_dynamic_ot_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, + 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x01, + 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x0a, 0x07, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x07, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb8, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, + 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6f, 0x74, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData = file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc +) + +func file_envoy_config_trace_v3_dynamic_ot_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData) + }) + return file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData +} + +var file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_dynamic_ot_proto_goTypes = []interface{}{ + (*DynamicOtConfig)(nil), // 0: envoy.config.trace.v3.DynamicOtConfig + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.DynamicOtConfig.config:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_dynamic_ot_proto_init() } +func file_envoy_config_trace_v3_dynamic_ot_proto_init() { + if File_envoy_config_trace_v3_dynamic_ot_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicOtConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_dynamic_ot_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_dynamic_ot_proto = out.File + file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc = nil + file_envoy_config_trace_v3_dynamic_ot_proto_goTypes = nil + file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go new file mode 100644 index 000000000..2e9360027 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go @@ -0,0 +1,177 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DynamicOtConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DynamicOtConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DynamicOtConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DynamicOtConfigMultiError, or nil if none found. +func (m *DynamicOtConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicOtConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetLibrary()) < 1 { + err := DynamicOtConfigValidationError{ + field: "Library", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DynamicOtConfigMultiError(errors) + } + + return nil +} + +// DynamicOtConfigMultiError is an error wrapping multiple validation errors +// returned by DynamicOtConfig.ValidateAll() if the designated constraints +// aren't met. +type DynamicOtConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicOtConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicOtConfigMultiError) AllErrors() []error { return m } + +// DynamicOtConfigValidationError is the validation error returned by +// DynamicOtConfig.Validate if the designated constraints aren't met. +type DynamicOtConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicOtConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicOtConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicOtConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicOtConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicOtConfigValidationError) ErrorName() string { return "DynamicOtConfigValidationError" } + +// Error satisfies the builtin error interface +func (e DynamicOtConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicOtConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicOtConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicOtConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go new file mode 100644 index 000000000..594ee8461 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DynamicOtConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicOtConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicOtConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Library) > 0 { + i -= len(m.Library) + copy(dAtA[i:], m.Library) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Library))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicOtConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Library) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go new file mode 100644 index 000000000..98a8a86f1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. +// +// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one +// supported. +// +// .. attention:: +// +// Use of this message type has been deprecated in favor of direct use of +// :ref:`Tracing.Http `. +type Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provides configuration for the HTTP tracer. + Http *Tracing_Http `protobuf:"bytes,1,opt,name=http,proto3" json:"http,omitempty"` +} + +func (x *Tracing) Reset() { + *x = Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing) ProtoMessage() {} + +func (x *Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing.ProtoReflect.Descriptor instead. +func (*Tracing) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP(), []int{0} +} + +func (x *Tracing) GetHttp() *Tracing_Http { + if x != nil { + return x.Http + } + return nil +} + +// Configuration for an HTTP tracer provider used by Envoy. +// +// The configuration is defined by the +// :ref:`HttpConnectionManager.Tracing ` +// :ref:`provider ` +// field. +type Tracing_Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. + // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Trace driver specific configuration which must be set according to the driver being instantiated. + // [#extension-category: envoy.tracers] + // + // Types that are assignable to ConfigType: + // + // *Tracing_Http_TypedConfig + ConfigType isTracing_Http_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *Tracing_Http) Reset() { + *x = Tracing_Http{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing_Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing_Http) ProtoMessage() {} + +func (x *Tracing_Http) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing_Http.ProtoReflect.Descriptor instead. +func (*Tracing_Http) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Tracing_Http) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Tracing_Http) GetConfigType() isTracing_Http_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *Tracing_Http) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*Tracing_Http_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isTracing_Http_ConfigType interface { + isTracing_Http_ConfigType() +} + +type Tracing_Http_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*Tracing_Http_TypedConfig) isTracing_Http_ConfigType() {} + +var File_envoy_config_trace_v3_http_tracer_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_http_tracer_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x12, 0x37, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x1a, 0xa6, 0x01, 0x0a, 0x04, + 0x48, 0x74, 0x74, 0x70, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, + 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x86, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_http_tracer_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_http_tracer_proto_rawDescData = file_envoy_config_trace_v3_http_tracer_proto_rawDesc +) + +func file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_http_tracer_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_http_tracer_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_http_tracer_proto_rawDescData) + }) + return file_envoy_config_trace_v3_http_tracer_proto_rawDescData +} + +var file_envoy_config_trace_v3_http_tracer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_http_tracer_proto_goTypes = []interface{}{ + (*Tracing)(nil), // 0: envoy.config.trace.v3.Tracing + (*Tracing_Http)(nil), // 1: envoy.config.trace.v3.Tracing.Http + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_envoy_config_trace_v3_http_tracer_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.Tracing.http:type_name -> envoy.config.trace.v3.Tracing.Http + 2, // 1: envoy.config.trace.v3.Tracing.Http.typed_config:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_http_tracer_proto_init() } +func file_envoy_config_trace_v3_http_tracer_proto_init() { + if File_envoy_config_trace_v3_http_tracer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing_Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Tracing_Http_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_http_tracer_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_http_tracer_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_http_tracer_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_http_tracer_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_http_tracer_proto = out.File + file_envoy_config_trace_v3_http_tracer_proto_rawDesc = nil + file_envoy_config_trace_v3_http_tracer_proto_goTypes = nil + file_envoy_config_trace_v3_http_tracer_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go new file mode 100644 index 000000000..60cec43ae --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go @@ -0,0 +1,320 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TracingMultiError, or nil if none found. +func (m *Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TracingMultiError(errors) + } + + return nil +} + +// TracingMultiError is an error wrapping multiple validation errors returned +// by Tracing.ValidateAll() if the designated constraints aren't met. +type TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TracingMultiError) AllErrors() []error { return m } + +// TracingValidationError is the validation error returned by Tracing.Validate +// if the designated constraints aren't met. +type TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TracingValidationError) ErrorName() string { return "TracingValidationError" } + +// Error satisfies the builtin error interface +func (e TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TracingValidationError{} + +// Validate checks the field values on Tracing_Http with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing_Http) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing_Http with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Tracing_HttpMultiError, or +// nil if none found. +func (m *Tracing_Http) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing_Http) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Tracing_HttpValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *Tracing_Http_TypedConfig: + if v == nil { + err := Tracing_HttpValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return Tracing_HttpMultiError(errors) + } + + return nil +} + +// Tracing_HttpMultiError is an error wrapping multiple validation errors +// returned by Tracing_Http.ValidateAll() if the designated constraints aren't met. +type Tracing_HttpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Tracing_HttpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Tracing_HttpMultiError) AllErrors() []error { return m } + +// Tracing_HttpValidationError is the validation error returned by +// Tracing_Http.Validate if the designated constraints aren't met. +type Tracing_HttpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Tracing_HttpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Tracing_HttpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Tracing_HttpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Tracing_HttpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Tracing_HttpValidationError) ErrorName() string { return "Tracing_HttpValidationError" } + +// Error satisfies the builtin error interface +func (e Tracing_HttpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing_Http.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Tracing_HttpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Tracing_HttpValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go new file mode 100644 index 000000000..756a6de2c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go @@ -0,0 +1,178 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Tracing_Http) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing_Http) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Tracing_Http_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http != nil { + size, err := m.Http.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing_Http_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http != nil { + l = m.Http.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go new file mode 100644 index 000000000..e7eeb33ef --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available propagation modes +type LightstepConfig_PropagationMode int32 + +const ( + // Propagate trace context in the single header x-ot-span-context. + LightstepConfig_ENVOY LightstepConfig_PropagationMode = 0 + // Propagate trace context using LightStep's native format. + LightstepConfig_LIGHTSTEP LightstepConfig_PropagationMode = 1 + // Propagate trace context using the b3 format. + LightstepConfig_B3 LightstepConfig_PropagationMode = 2 + // Propagation trace context using the w3 trace-context standard. + LightstepConfig_TRACE_CONTEXT LightstepConfig_PropagationMode = 3 +) + +// Enum value maps for LightstepConfig_PropagationMode. +var ( + LightstepConfig_PropagationMode_name = map[int32]string{ + 0: "ENVOY", + 1: "LIGHTSTEP", + 2: "B3", + 3: "TRACE_CONTEXT", + } + LightstepConfig_PropagationMode_value = map[string]int32{ + "ENVOY": 0, + "LIGHTSTEP": 1, + "B3": 2, + "TRACE_CONTEXT": 3, + } +) + +func (x LightstepConfig_PropagationMode) Enum() *LightstepConfig_PropagationMode { + p := new(LightstepConfig_PropagationMode) + *p = x + return p +} + +func (x LightstepConfig_PropagationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LightstepConfig_PropagationMode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_lightstep_proto_enumTypes[0].Descriptor() +} + +func (LightstepConfig_PropagationMode) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_lightstep_proto_enumTypes[0] +} + +func (x LightstepConfig_PropagationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LightstepConfig_PropagationMode.Descriptor instead. +func (LightstepConfig_PropagationMode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] +// [#not-implemented-hide:] +type LightstepConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster manager cluster that hosts the LightStep collectors. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // File containing the access token to the `LightStep + // `_ API. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/lightstep.proto. + AccessTokenFile string `protobuf:"bytes,2,opt,name=access_token_file,json=accessTokenFile,proto3" json:"access_token_file,omitempty"` + // Access token to the `LightStep `_ API. + AccessToken *v3.DataSource `protobuf:"bytes,4,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"` + // Propagation modes to use by LightStep's tracer. + PropagationModes []LightstepConfig_PropagationMode `protobuf:"varint,3,rep,packed,name=propagation_modes,json=propagationModes,proto3,enum=envoy.config.trace.v3.LightstepConfig_PropagationMode" json:"propagation_modes,omitempty"` +} + +func (x *LightstepConfig) Reset() { + *x = LightstepConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_lightstep_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightstepConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightstepConfig) ProtoMessage() {} + +func (x *LightstepConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_lightstep_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightstepConfig.ProtoReflect.Descriptor instead. +func (*LightstepConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP(), []int{0} +} + +func (x *LightstepConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/lightstep.proto. +func (x *LightstepConfig) GetAccessTokenFile() string { + if x != nil { + return x.AccessTokenFile + } + return "" +} + +func (x *LightstepConfig) GetAccessToken() *v3.DataSource { + if x != nil { + return x.AccessToken + } + return nil +} + +func (x *LightstepConfig) GetPropagationModes() []LightstepConfig_PropagationMode { + if x != nil { + return x.PropagationModes + } + return nil +} + +var File_envoy_config_trace_v3_lightstep_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_lightstep_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xaf, 0x03, 0x0a, 0x0f, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x11, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x43, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x72, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, + 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, + 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x46, 0x0a, 0x0f, 0x50, + 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x49, 0x47, + 0x48, 0x54, 0x53, 0x54, 0x45, 0x50, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x02, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x03, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0xb7, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2c, 0x12, 0x2a, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x73, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x2e, 0x76, + 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_lightstep_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_lightstep_proto_rawDescData = file_envoy_config_trace_v3_lightstep_proto_rawDesc +) + +func file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_lightstep_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_lightstep_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_lightstep_proto_rawDescData) + }) + return file_envoy_config_trace_v3_lightstep_proto_rawDescData +} + +var file_envoy_config_trace_v3_lightstep_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_lightstep_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_lightstep_proto_goTypes = []interface{}{ + (LightstepConfig_PropagationMode)(0), // 0: envoy.config.trace.v3.LightstepConfig.PropagationMode + (*LightstepConfig)(nil), // 1: envoy.config.trace.v3.LightstepConfig + (*v3.DataSource)(nil), // 2: envoy.config.core.v3.DataSource +} +var file_envoy_config_trace_v3_lightstep_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.LightstepConfig.access_token:type_name -> envoy.config.core.v3.DataSource + 0, // 1: envoy.config.trace.v3.LightstepConfig.propagation_modes:type_name -> envoy.config.trace.v3.LightstepConfig.PropagationMode + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_lightstep_proto_init() } +func file_envoy_config_trace_v3_lightstep_proto_init() { + if File_envoy_config_trace_v3_lightstep_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_lightstep_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightstepConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_lightstep_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_lightstep_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_lightstep_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_lightstep_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_lightstep_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_lightstep_proto = out.File + file_envoy_config_trace_v3_lightstep_proto_rawDesc = nil + file_envoy_config_trace_v3_lightstep_proto_goTypes = nil + file_envoy_config_trace_v3_lightstep_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go new file mode 100644 index 000000000..d9ebdddca --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go @@ -0,0 +1,195 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LightstepConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LightstepConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LightstepConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LightstepConfigMultiError, or nil if none found. +func (m *LightstepConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LightstepConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := LightstepConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AccessTokenFile + + if all { + switch v := interface{}(m.GetAccessToken()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAccessToken()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetPropagationModes() { + _, _ = idx, item + + if _, ok := LightstepConfig_PropagationMode_name[int32(item)]; !ok { + err := LightstepConfigValidationError{ + field: fmt.Sprintf("PropagationModes[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return LightstepConfigMultiError(errors) + } + + return nil +} + +// LightstepConfigMultiError is an error wrapping multiple validation errors +// returned by LightstepConfig.ValidateAll() if the designated constraints +// aren't met. +type LightstepConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LightstepConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LightstepConfigMultiError) AllErrors() []error { return m } + +// LightstepConfigValidationError is the validation error returned by +// LightstepConfig.Validate if the designated constraints aren't met. +type LightstepConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LightstepConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LightstepConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LightstepConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LightstepConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LightstepConfigValidationError) ErrorName() string { return "LightstepConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LightstepConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLightstepConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LightstepConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LightstepConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go new file mode 100644 index 000000000..c1f9240a6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go @@ -0,0 +1,145 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LightstepConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightstepConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LightstepConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessToken != nil { + if vtmsg, ok := interface{}(m.AccessToken).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessToken) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.PropagationModes) > 0 { + var pksize2 int + for _, num := range m.PropagationModes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.PropagationModes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1a + } + if len(m.AccessTokenFile) > 0 { + i -= len(m.AccessTokenFile) + copy(dAtA[i:], m.AccessTokenFile) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessTokenFile))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightstepConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AccessTokenFile) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PropagationModes) > 0 { + l = 0 + for _, e := range m.PropagationModes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.AccessToken != nil { + if size, ok := interface{}(m.AccessToken).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AccessToken) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go new file mode 100644 index 000000000..b7e7c69f6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go @@ -0,0 +1,489 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OpenCensusConfig_TraceContext int32 + +const ( + // No-op default, no trace context is utilized. + OpenCensusConfig_NONE OpenCensusConfig_TraceContext = 0 + // W3C Trace-Context format "traceparent:" header. + OpenCensusConfig_TRACE_CONTEXT OpenCensusConfig_TraceContext = 1 + // Binary "grpc-trace-bin:" header. + OpenCensusConfig_GRPC_TRACE_BIN OpenCensusConfig_TraceContext = 2 + // "X-Cloud-Trace-Context:" header. + OpenCensusConfig_CLOUD_TRACE_CONTEXT OpenCensusConfig_TraceContext = 3 + // X-B3-* headers. + OpenCensusConfig_B3 OpenCensusConfig_TraceContext = 4 +) + +// Enum value maps for OpenCensusConfig_TraceContext. +var ( + OpenCensusConfig_TraceContext_name = map[int32]string{ + 0: "NONE", + 1: "TRACE_CONTEXT", + 2: "GRPC_TRACE_BIN", + 3: "CLOUD_TRACE_CONTEXT", + 4: "B3", + } + OpenCensusConfig_TraceContext_value = map[string]int32{ + "NONE": 0, + "TRACE_CONTEXT": 1, + "GRPC_TRACE_BIN": 2, + "CLOUD_TRACE_CONTEXT": 3, + "B3": 4, + } +) + +func (x OpenCensusConfig_TraceContext) Enum() *OpenCensusConfig_TraceContext { + p := new(OpenCensusConfig_TraceContext) + *p = x + return p +} + +func (x OpenCensusConfig_TraceContext) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OpenCensusConfig_TraceContext) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_opencensus_proto_enumTypes[0].Descriptor() +} + +func (OpenCensusConfig_TraceContext) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_opencensus_proto_enumTypes[0] +} + +func (x OpenCensusConfig_TraceContext) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OpenCensusConfig_TraceContext.Descriptor instead. +func (OpenCensusConfig_TraceContext) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +type OpenCensusConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + TraceConfig *v1.TraceConfig `protobuf:"bytes,1,opt,name=trace_config,json=traceConfig,proto3" json:"trace_config,omitempty"` + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StdoutExporterEnabled bool `protobuf:"varint,2,opt,name=stdout_exporter_enabled,json=stdoutExporterEnabled,proto3" json:"stdout_exporter_enabled,omitempty"` + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverExporterEnabled bool `protobuf:"varint,3,opt,name=stackdriver_exporter_enabled,json=stackdriverExporterEnabled,proto3" json:"stackdriver_exporter_enabled,omitempty"` + // The Cloud project_id to use for Stackdriver tracing. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverProjectId string `protobuf:"bytes,4,opt,name=stackdriver_project_id,json=stackdriverProjectId,proto3" json:"stackdriver_project_id,omitempty"` + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverAddress string `protobuf:"bytes,10,opt,name=stackdriver_address,json=stackdriverAddress,proto3" json:"stackdriver_address,omitempty"` + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverGrpcService *v3.GrpcService `protobuf:"bytes,13,opt,name=stackdriver_grpc_service,json=stackdriverGrpcService,proto3" json:"stackdriver_grpc_service,omitempty"` + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin + // tracer `. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + ZipkinExporterEnabled bool `protobuf:"varint,5,opt,name=zipkin_exporter_enabled,json=zipkinExporterEnabled,proto3" json:"zipkin_exporter_enabled,omitempty"` + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is + // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer + // `. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + ZipkinUrl string `protobuf:"bytes,6,opt,name=zipkin_url,json=zipkinUrl,proto3" json:"zipkin_url,omitempty"` + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentExporterEnabled bool `protobuf:"varint,11,opt,name=ocagent_exporter_enabled,json=ocagentExporterEnabled,proto3" json:"ocagent_exporter_enabled,omitempty"` + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentAddress string `protobuf:"bytes,12,opt,name=ocagent_address,json=ocagentAddress,proto3" json:"ocagent_address,omitempty"` + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentGrpcService *v3.GrpcService `protobuf:"bytes,14,opt,name=ocagent_grpc_service,json=ocagentGrpcService,proto3" json:"ocagent_grpc_service,omitempty"` + // List of incoming trace context headers we will accept. First one found + // wins. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + IncomingTraceContext []OpenCensusConfig_TraceContext `protobuf:"varint,8,rep,packed,name=incoming_trace_context,json=incomingTraceContext,proto3,enum=envoy.config.trace.v3.OpenCensusConfig_TraceContext" json:"incoming_trace_context,omitempty"` + // List of outgoing trace context headers we will produce. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OutgoingTraceContext []OpenCensusConfig_TraceContext `protobuf:"varint,9,rep,packed,name=outgoing_trace_context,json=outgoingTraceContext,proto3,enum=envoy.config.trace.v3.OpenCensusConfig_TraceContext" json:"outgoing_trace_context,omitempty"` +} + +func (x *OpenCensusConfig) Reset() { + *x = OpenCensusConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_opencensus_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenCensusConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenCensusConfig) ProtoMessage() {} + +func (x *OpenCensusConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_opencensus_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenCensusConfig.ProtoReflect.Descriptor instead. +func (*OpenCensusConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetTraceConfig() *v1.TraceConfig { + if x != nil { + return x.TraceConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStdoutExporterEnabled() bool { + if x != nil { + return x.StdoutExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverExporterEnabled() bool { + if x != nil { + return x.StackdriverExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverProjectId() string { + if x != nil { + return x.StackdriverProjectId + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverAddress() string { + if x != nil { + return x.StackdriverAddress + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverGrpcService() *v3.GrpcService { + if x != nil { + return x.StackdriverGrpcService + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetZipkinExporterEnabled() bool { + if x != nil { + return x.ZipkinExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetZipkinUrl() string { + if x != nil { + return x.ZipkinUrl + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentExporterEnabled() bool { + if x != nil { + return x.OcagentExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentAddress() string { + if x != nil { + return x.OcagentAddress + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentGrpcService() *v3.GrpcService { + if x != nil { + return x.OcagentGrpcService + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetIncomingTraceContext() []OpenCensusConfig_TraceContext { + if x != nil { + return x.IncomingTraceContext + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOutgoingTraceContext() []OpenCensusConfig_TraceContext { + if x != nil { + return x.OutgoingTraceContext + } + return nil +} + +var File_envoy_config_trace_v3_opencensus_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_opencensus_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, + 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x0a, + 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, + 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x49, 0x0a, 0x17, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x15, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x53, 0x0a, 0x1c, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x1a, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, + 0x72, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x13, 0x73, 0x74, 0x61, + 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x73, 0x74, 0x61, 0x63, 0x6b, + 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x6e, 0x0a, + 0x18, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, + 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x17, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x15, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x0a, 0x7a, 0x69, 0x70, 0x6b, + 0x69, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x09, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x4b, 0x0a, 0x18, 0x6f, 0x63, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x16, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0f, 0x6f, 0x63, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0e, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x66, 0x0a, 0x14, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, + 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x16, 0x69, + 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x7d, 0x0a, 0x16, 0x6f, 0x75, + 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x60, 0x0a, 0x0c, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x45, 0x58, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x54, + 0x52, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4c, + 0x4f, 0x55, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x04, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, + 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x73, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x76, + 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_opencensus_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_opencensus_proto_rawDescData = file_envoy_config_trace_v3_opencensus_proto_rawDesc +) + +func file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_opencensus_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_opencensus_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_opencensus_proto_rawDescData) + }) + return file_envoy_config_trace_v3_opencensus_proto_rawDescData +} + +var file_envoy_config_trace_v3_opencensus_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_opencensus_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_opencensus_proto_goTypes = []interface{}{ + (OpenCensusConfig_TraceContext)(0), // 0: envoy.config.trace.v3.OpenCensusConfig.TraceContext + (*OpenCensusConfig)(nil), // 1: envoy.config.trace.v3.OpenCensusConfig + (*v1.TraceConfig)(nil), // 2: opencensus.proto.trace.v1.TraceConfig + (*v3.GrpcService)(nil), // 3: envoy.config.core.v3.GrpcService +} +var file_envoy_config_trace_v3_opencensus_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.OpenCensusConfig.trace_config:type_name -> opencensus.proto.trace.v1.TraceConfig + 3, // 1: envoy.config.trace.v3.OpenCensusConfig.stackdriver_grpc_service:type_name -> envoy.config.core.v3.GrpcService + 3, // 2: envoy.config.trace.v3.OpenCensusConfig.ocagent_grpc_service:type_name -> envoy.config.core.v3.GrpcService + 0, // 3: envoy.config.trace.v3.OpenCensusConfig.incoming_trace_context:type_name -> envoy.config.trace.v3.OpenCensusConfig.TraceContext + 0, // 4: envoy.config.trace.v3.OpenCensusConfig.outgoing_trace_context:type_name -> envoy.config.trace.v3.OpenCensusConfig.TraceContext + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_opencensus_proto_init() } +func file_envoy_config_trace_v3_opencensus_proto_init() { + if File_envoy_config_trace_v3_opencensus_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_opencensus_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenCensusConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_opencensus_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_opencensus_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_opencensus_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_opencensus_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_opencensus_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_opencensus_proto = out.File + file_envoy_config_trace_v3_opencensus_proto_rawDesc = nil + file_envoy_config_trace_v3_opencensus_proto_goTypes = nil + file_envoy_config_trace_v3_opencensus_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go new file mode 100644 index 000000000..4e8286181 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go @@ -0,0 +1,240 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OpenCensusConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OpenCensusConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OpenCensusConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OpenCensusConfigMultiError, or nil if none found. +func (m *OpenCensusConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OpenCensusConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTraceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StdoutExporterEnabled + + // no validation rules for StackdriverExporterEnabled + + // no validation rules for StackdriverProjectId + + // no validation rules for StackdriverAddress + + if all { + switch v := interface{}(m.GetStackdriverGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStackdriverGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ZipkinExporterEnabled + + // no validation rules for ZipkinUrl + + // no validation rules for OcagentExporterEnabled + + // no validation rules for OcagentAddress + + if all { + switch v := interface{}(m.GetOcagentGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcagentGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OpenCensusConfigMultiError(errors) + } + + return nil +} + +// OpenCensusConfigMultiError is an error wrapping multiple validation errors +// returned by OpenCensusConfig.ValidateAll() if the designated constraints +// aren't met. +type OpenCensusConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OpenCensusConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OpenCensusConfigMultiError) AllErrors() []error { return m } + +// OpenCensusConfigValidationError is the validation error returned by +// OpenCensusConfig.Validate if the designated constraints aren't met. +type OpenCensusConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OpenCensusConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OpenCensusConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OpenCensusConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OpenCensusConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OpenCensusConfigValidationError) ErrorName() string { return "OpenCensusConfigValidationError" } + +// Error satisfies the builtin error interface +func (e OpenCensusConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOpenCensusConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OpenCensusConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OpenCensusConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go new file mode 100644 index 000000000..66b08bf86 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go @@ -0,0 +1,311 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenCensusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenCensusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenCensusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcagentGrpcService != nil { + if vtmsg, ok := interface{}(m.OcagentGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcagentGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StackdriverGrpcService != nil { + if vtmsg, ok := interface{}(m.StackdriverGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StackdriverGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if len(m.OcagentAddress) > 0 { + i -= len(m.OcagentAddress) + copy(dAtA[i:], m.OcagentAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OcagentAddress))) + i-- + dAtA[i] = 0x62 + } + if m.OcagentExporterEnabled { + i-- + if m.OcagentExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.StackdriverAddress) > 0 { + i -= len(m.StackdriverAddress) + copy(dAtA[i:], m.StackdriverAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverAddress))) + i-- + dAtA[i] = 0x52 + } + if len(m.OutgoingTraceContext) > 0 { + var pksize2 int + for _, num := range m.OutgoingTraceContext { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.OutgoingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x4a + } + if len(m.IncomingTraceContext) > 0 { + var pksize4 int + for _, num := range m.IncomingTraceContext { + pksize4 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num1 := range m.IncomingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA[j3] = uint8(num) + j3++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x42 + } + if len(m.ZipkinUrl) > 0 { + i -= len(m.ZipkinUrl) + copy(dAtA[i:], m.ZipkinUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ZipkinUrl))) + i-- + dAtA[i] = 0x32 + } + if m.ZipkinExporterEnabled { + i-- + if m.ZipkinExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.StackdriverProjectId) > 0 { + i -= len(m.StackdriverProjectId) + copy(dAtA[i:], m.StackdriverProjectId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverProjectId))) + i-- + dAtA[i] = 0x22 + } + if m.StackdriverExporterEnabled { + i-- + if m.StackdriverExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.StdoutExporterEnabled { + i-- + if m.StdoutExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TraceConfig != nil { + if vtmsg, ok := interface{}(m.TraceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TraceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenCensusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceConfig != nil { + if size, ok := interface{}(m.TraceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TraceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StdoutExporterEnabled { + n += 2 + } + if m.StackdriverExporterEnabled { + n += 2 + } + l = len(m.StackdriverProjectId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ZipkinExporterEnabled { + n += 2 + } + l = len(m.ZipkinUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.IncomingTraceContext) > 0 { + l = 0 + for _, e := range m.IncomingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.OutgoingTraceContext) > 0 { + l = 0 + for _, e := range m.OutgoingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + l = len(m.StackdriverAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentExporterEnabled { + n += 2 + } + l = len(m.OcagentAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StackdriverGrpcService != nil { + if size, ok := interface{}(m.StackdriverGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StackdriverGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentGrpcService != nil { + if size, ok := interface{}(m.OcagentGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcagentGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go new file mode 100644 index 000000000..a0087e258 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the OpenTelemetry tracer. +// +// [#extension: envoy.tracers.opentelemetry] +// +// [#next-free-field: 6] +type OpenTelemetryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that will receive OTLP traces. + // Note that the tracer drops traces if the server does not read data fast enough. + // This field can be left empty to disable reporting traces to the gRPC service. + // Only one of “grpc_service“, “http_service“ may be used. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + // The upstream HTTP cluster that will receive OTLP traces. + // This field can be left empty to disable reporting traces to the HTTP service. + // Only one of “grpc_service“, “http_service“ may be used. + // + // .. note:: + // + // Note: The ``request_headers_to_add`` property in the OTLP HTTP exporter service + // does not support the :ref:`format specifier ` as used for + // :ref:`HTTP access logging `. + // The values configured are added as HTTP headers on the OTLP export request + // without any formatting applied. + HttpService *v3.HttpService `protobuf:"bytes,3,opt,name=http_service,json=httpService,proto3" json:"http_service,omitempty"` + // The name for the service. This will be populated in the ResourceSpan Resource attributes. + // If it is not provided, it will default to "unknown_service:envoy". + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // An ordered list of resource detectors + // [#extension-category: envoy.tracers.opentelemetry.resource_detectors] + ResourceDetectors []*v3.TypedExtensionConfig `protobuf:"bytes,4,rep,name=resource_detectors,json=resourceDetectors,proto3" json:"resource_detectors,omitempty"` + // Specifies the sampler to be used by the OpenTelemetry tracer. + // The configured sampler implements the Sampler interface defined by the OpenTelemetry specification. + // This field can be left empty. In this case, the default Envoy sampling decision is used. + // + // See: `OpenTelemetry sampler specification `_ + // [#extension-category: envoy.tracers.opentelemetry.samplers] + Sampler *v3.TypedExtensionConfig `protobuf:"bytes,5,opt,name=sampler,proto3" json:"sampler,omitempty"` +} + +func (x *OpenTelemetryConfig) Reset() { + *x = OpenTelemetryConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenTelemetryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenTelemetryConfig) ProtoMessage() {} + +func (x *OpenTelemetryConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenTelemetryConfig.ProtoReflect.Descriptor instead. +func (*OpenTelemetryConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opentelemetry_proto_rawDescGZIP(), []int{0} +} + +func (x *OpenTelemetryConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *OpenTelemetryConfig) GetHttpService() *v3.HttpService { + if x != nil { + return x.HttpService + } + return nil +} + +func (x *OpenTelemetryConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *OpenTelemetryConfig) GetResourceDetectors() []*v3.TypedExtensionConfig { + if x != nil { + return x.ResourceDetectors + } + return nil +} + +func (x *OpenTelemetryConfig) GetSampler() *v3.TypedExtensionConfig { + if x != nil { + return x.Sampler + } + return nil +} + +var File_envoy_config_trace_v3_opentelemetry_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x03, 0x0a, 0x13, 0x4f, 0x70, + 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x5b, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x15, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x0f, 0x12, 0x0d, 0x6f, 0x74, 0x6c, 0x70, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, + 0x0a, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x15, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0f, 0x12, + 0x0d, 0x6f, 0x74, 0x6c, 0x70, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x0b, + 0x68, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x42, + 0x89, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x4f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_opentelemetry_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_opentelemetry_proto_rawDescData = file_envoy_config_trace_v3_opentelemetry_proto_rawDesc +) + +func file_envoy_config_trace_v3_opentelemetry_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_opentelemetry_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_opentelemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_opentelemetry_proto_rawDescData) + }) + return file_envoy_config_trace_v3_opentelemetry_proto_rawDescData +} + +var file_envoy_config_trace_v3_opentelemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_opentelemetry_proto_goTypes = []interface{}{ + (*OpenTelemetryConfig)(nil), // 0: envoy.config.trace.v3.OpenTelemetryConfig + (*v3.GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService + (*v3.HttpService)(nil), // 2: envoy.config.core.v3.HttpService + (*v3.TypedExtensionConfig)(nil), // 3: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_trace_v3_opentelemetry_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.OpenTelemetryConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 2, // 1: envoy.config.trace.v3.OpenTelemetryConfig.http_service:type_name -> envoy.config.core.v3.HttpService + 3, // 2: envoy.config.trace.v3.OpenTelemetryConfig.resource_detectors:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 3: envoy.config.trace.v3.OpenTelemetryConfig.sampler:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_opentelemetry_proto_init() } +func file_envoy_config_trace_v3_opentelemetry_proto_init() { + if File_envoy_config_trace_v3_opentelemetry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenTelemetryConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_opentelemetry_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_opentelemetry_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_opentelemetry_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_opentelemetry_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_opentelemetry_proto = out.File + file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = nil + file_envoy_config_trace_v3_opentelemetry_proto_goTypes = nil + file_envoy_config_trace_v3_opentelemetry_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go new file mode 100644 index 000000000..101f73bbe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go @@ -0,0 +1,262 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OpenTelemetryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *OpenTelemetryConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OpenTelemetryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OpenTelemetryConfigMultiError, or nil if none found. +func (m *OpenTelemetryConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OpenTelemetryConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ServiceName + + for idx, item := range m.GetResourceDetectors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetSampler()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSampler()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OpenTelemetryConfigMultiError(errors) + } + + return nil +} + +// OpenTelemetryConfigMultiError is an error wrapping multiple validation +// errors returned by OpenTelemetryConfig.ValidateAll() if the designated +// constraints aren't met. +type OpenTelemetryConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OpenTelemetryConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OpenTelemetryConfigMultiError) AllErrors() []error { return m } + +// OpenTelemetryConfigValidationError is the validation error returned by +// OpenTelemetryConfig.Validate if the designated constraints aren't met. +type OpenTelemetryConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OpenTelemetryConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OpenTelemetryConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OpenTelemetryConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OpenTelemetryConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OpenTelemetryConfigValidationError) ErrorName() string { + return "OpenTelemetryConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e OpenTelemetryConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOpenTelemetryConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OpenTelemetryConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OpenTelemetryConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go new file mode 100644 index 000000000..d6c628051 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go @@ -0,0 +1,206 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenTelemetryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenTelemetryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenTelemetryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Sampler != nil { + if vtmsg, ok := interface{}(m.Sampler).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Sampler) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ResourceDetectors) > 0 { + for iNdEx := len(m.ResourceDetectors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResourceDetectors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResourceDetectors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.HttpService != nil { + if vtmsg, ok := interface{}(m.HttpService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenTelemetryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpService != nil { + if size, ok := interface{}(m.HttpService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceDetectors) > 0 { + for _, e := range m.ResourceDetectors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Sampler != nil { + if size, ok := interface{}(m.Sampler).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Sampler) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go new file mode 100644 index 000000000..662b1bea5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration structure. +type TraceServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that hosts the metrics service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` +} + +func (x *TraceServiceConfig) Reset() { + *x = TraceServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceServiceConfig) ProtoMessage() {} + +func (x *TraceServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceServiceConfig.ProtoReflect.Descriptor instead. +func (*TraceServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_service_proto_rawDescGZIP(), []int{0} +} + +func (x *TraceServiceConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +var File_envoy_config_trace_v3_service_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_service_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x95, 0x01, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x83, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_service_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_service_proto_rawDescData = file_envoy_config_trace_v3_service_proto_rawDesc +) + +func file_envoy_config_trace_v3_service_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_service_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_service_proto_rawDescData) + }) + return file_envoy_config_trace_v3_service_proto_rawDescData +} + +var file_envoy_config_trace_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_service_proto_goTypes = []interface{}{ + (*TraceServiceConfig)(nil), // 0: envoy.config.trace.v3.TraceServiceConfig + (*v3.GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService +} +var file_envoy_config_trace_v3_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.TraceServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_service_proto_init() } +func file_envoy_config_trace_v3_service_proto_init() { + if File_envoy_config_trace_v3_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_service_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_service_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_service_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_service_proto = out.File + file_envoy_config_trace_v3_service_proto_rawDesc = nil + file_envoy_config_trace_v3_service_proto_goTypes = nil + file_envoy_config_trace_v3_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go new file mode 100644 index 000000000..87b74b554 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go @@ -0,0 +1,179 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TraceServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TraceServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TraceServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TraceServiceConfigMultiError, or nil if none found. +func (m *TraceServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TraceServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TraceServiceConfigMultiError(errors) + } + + return nil +} + +// TraceServiceConfigMultiError is an error wrapping multiple validation errors +// returned by TraceServiceConfig.ValidateAll() if the designated constraints +// aren't met. +type TraceServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TraceServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TraceServiceConfigMultiError) AllErrors() []error { return m } + +// TraceServiceConfigValidationError is the validation error returned by +// TraceServiceConfig.Validate if the designated constraints aren't met. +type TraceServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TraceServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TraceServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TraceServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TraceServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TraceServiceConfigValidationError) ErrorName() string { + return "TraceServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TraceServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTraceServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TraceServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TraceServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go new file mode 100644 index 000000000..71fddd389 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go new file mode 100644 index 000000000..948ea5f13 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go @@ -0,0 +1,341 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the +// provider of tracing, then +// :ref:`spawn_upstream_span ` +// in the tracing config must be set to true to get the correct topology and tracing data. Moreover, SkyWalking +// Tracer does not support SkyWalking extension header (“sw8-x“) temporarily. +// [#extension: envoy.tracers.skywalking] +type SkyWalkingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SkyWalking collector service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig,proto3" json:"client_config,omitempty"` +} + +func (x *SkyWalkingConfig) Reset() { + *x = SkyWalkingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SkyWalkingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkyWalkingConfig) ProtoMessage() {} + +func (x *SkyWalkingConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SkyWalkingConfig.ProtoReflect.Descriptor instead. +func (*SkyWalkingConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP(), []int{0} +} + +func (x *SkyWalkingConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *SkyWalkingConfig) GetClientConfig() *ClientConfig { + if x != nil { + return x.ClientConfig + } + return nil +} + +// Client config for SkyWalking tracer. +type ClientConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Service name for SkyWalking tracer. If this field is empty, then local service cluster name + // that configured by :ref:`Bootstrap node ` + // message's :ref:`cluster ` field or command line + // option :option:`--service-cluster` will be used. If both this field and local service cluster + // name are empty, “EnvoyProxy“ is used as the service name by default. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Service instance name for SkyWalking tracer. If this field is empty, then local service node + // that configured by :ref:`Bootstrap node ` + // message's :ref:`id ` field or command line option + // :option:`--service-node` will be used. If both this field and local service node are empty, + // “EnvoyProxy“ is used as the instance name by default. + InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure + // that monitoring application data can be trusted. In current version, Token is considered as a + // simple string. + // [#comment:TODO(wbpcode): Get backend token through the SDS API.] + // + // Types that are assignable to BackendTokenSpecifier: + // + // *ClientConfig_BackendToken + BackendTokenSpecifier isClientConfig_BackendTokenSpecifier `protobuf_oneof:"backend_token_specifier"` + // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. + // This field specifies the maximum number of segments that can be cached. If not specified, the + // default is 1024. + MaxCacheSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_cache_size,json=maxCacheSize,proto3" json:"max_cache_size,omitempty"` +} + +func (x *ClientConfig) Reset() { + *x = ClientConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig) ProtoMessage() {} + +func (x *ClientConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *ClientConfig) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (m *ClientConfig) GetBackendTokenSpecifier() isClientConfig_BackendTokenSpecifier { + if m != nil { + return m.BackendTokenSpecifier + } + return nil +} + +func (x *ClientConfig) GetBackendToken() string { + if x, ok := x.GetBackendTokenSpecifier().(*ClientConfig_BackendToken); ok { + return x.BackendToken + } + return "" +} + +func (x *ClientConfig) GetMaxCacheSize() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxCacheSize + } + return nil +} + +type isClientConfig_BackendTokenSpecifier interface { + isClientConfig_BackendTokenSpecifier() +} + +type ClientConfig_BackendToken struct { + // Inline authentication token string. + BackendToken string `protobuf:"bytes,3,opt,name=backend_token,json=backendToken,proto3,oneof"` +} + +func (*ClientConfig_BackendToken) isClientConfig_BackendTokenSpecifier() {} + +var File_envoy_config_trace_v3_skywalking_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_skywalking_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6b, 0x79, 0x77, 0x61, 0x6c, 0x6b, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xac, 0x01, 0x0a, 0x10, 0x53, 0x6b, 0x79, 0x57, 0x61, 0x6c, 0x6b, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0xe4, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x0d, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x42, 0x0a, 0x0e, 0x6d, 0x61, 0x78, + 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x19, 0x0a, + 0x17, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x73, 0x6b, 0x79, 0x77, + 0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x6b, 0x79, + 0x77, 0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_skywalking_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_skywalking_proto_rawDescData = file_envoy_config_trace_v3_skywalking_proto_rawDesc +) + +func file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_skywalking_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_skywalking_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_skywalking_proto_rawDescData) + }) + return file_envoy_config_trace_v3_skywalking_proto_rawDescData +} + +var file_envoy_config_trace_v3_skywalking_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_skywalking_proto_goTypes = []interface{}{ + (*SkyWalkingConfig)(nil), // 0: envoy.config.trace.v3.SkyWalkingConfig + (*ClientConfig)(nil), // 1: envoy.config.trace.v3.ClientConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value +} +var file_envoy_config_trace_v3_skywalking_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.SkyWalkingConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // 1: envoy.config.trace.v3.SkyWalkingConfig.client_config:type_name -> envoy.config.trace.v3.ClientConfig + 3, // 2: envoy.config.trace.v3.ClientConfig.max_cache_size:type_name -> google.protobuf.UInt32Value + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_skywalking_proto_init() } +func file_envoy_config_trace_v3_skywalking_proto_init() { + if File_envoy_config_trace_v3_skywalking_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_skywalking_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SkyWalkingConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_skywalking_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_trace_v3_skywalking_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ClientConfig_BackendToken)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_skywalking_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_skywalking_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_skywalking_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_skywalking_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_skywalking_proto = out.File + file_envoy_config_trace_v3_skywalking_proto_rawDesc = nil + file_envoy_config_trace_v3_skywalking_proto_goTypes = nil + file_envoy_config_trace_v3_skywalking_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go new file mode 100644 index 000000000..559bdb493 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go @@ -0,0 +1,355 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SkyWalkingConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SkyWalkingConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SkyWalkingConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SkyWalkingConfigMultiError, or nil if none found. +func (m *SkyWalkingConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SkyWalkingConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetClientConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SkyWalkingConfigMultiError(errors) + } + + return nil +} + +// SkyWalkingConfigMultiError is an error wrapping multiple validation errors +// returned by SkyWalkingConfig.ValidateAll() if the designated constraints +// aren't met. +type SkyWalkingConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SkyWalkingConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SkyWalkingConfigMultiError) AllErrors() []error { return m } + +// SkyWalkingConfigValidationError is the validation error returned by +// SkyWalkingConfig.Validate if the designated constraints aren't met. +type SkyWalkingConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SkyWalkingConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SkyWalkingConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SkyWalkingConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SkyWalkingConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SkyWalkingConfigValidationError) ErrorName() string { return "SkyWalkingConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SkyWalkingConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSkyWalkingConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SkyWalkingConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SkyWalkingConfigValidationError{} + +// Validate checks the field values on ClientConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClientConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClientConfigMultiError, or +// nil if none found. +func (m *ClientConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceName + + // no validation rules for InstanceName + + if all { + switch v := interface{}(m.GetMaxCacheSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxCacheSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.BackendTokenSpecifier.(type) { + case *ClientConfig_BackendToken: + if v == nil { + err := ClientConfigValidationError{ + field: "BackendTokenSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for BackendToken + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ClientConfigMultiError(errors) + } + + return nil +} + +// ClientConfigMultiError is an error wrapping multiple validation errors +// returned by ClientConfig.ValidateAll() if the designated constraints aren't met. +type ClientConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfigMultiError) AllErrors() []error { return m } + +// ClientConfigValidationError is the validation error returned by +// ClientConfig.Validate if the designated constraints aren't met. +type ClientConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfigValidationError) ErrorName() string { return "ClientConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClientConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go new file mode 100644 index 000000000..8af59a37d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go @@ -0,0 +1,224 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SkyWalkingConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SkyWalkingConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SkyWalkingConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientConfig != nil { + size, err := m.ClientConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCacheSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxCacheSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.BackendTokenSpecifier.(*ClientConfig_BackendToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig_BackendToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_BackendToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BackendToken) + copy(dAtA[i:], m.BackendToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BackendToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *SkyWalkingConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientConfig != nil { + l = m.ClientConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.BackendTokenSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MaxCacheSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxCacheSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig_BackendToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BackendToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go new file mode 100644 index 000000000..94ded5e4a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/trace.proto + +package tracev3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_envoy_config_trace_v3_trace_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_trace_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6f, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x42, 0x79, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x50, 0x00, + 0x50, 0x01, 0x50, 0x02, 0x50, 0x03, 0x50, 0x04, 0x50, 0x05, 0x50, 0x06, 0x50, 0x07, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_config_trace_v3_trace_proto_goTypes = []interface{}{} +var file_envoy_config_trace_v3_trace_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_trace_proto_init() } +func file_envoy_config_trace_v3_trace_proto_init() { + if File_envoy_config_trace_v3_trace_proto != nil { + return + } + file_envoy_config_trace_v3_datadog_proto_init() + file_envoy_config_trace_v3_dynamic_ot_proto_init() + file_envoy_config_trace_v3_http_tracer_proto_init() + file_envoy_config_trace_v3_lightstep_proto_init() + file_envoy_config_trace_v3_opencensus_proto_init() + file_envoy_config_trace_v3_opentelemetry_proto_init() + file_envoy_config_trace_v3_service_proto_init() + file_envoy_config_trace_v3_zipkin_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_trace_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_trace_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_trace_proto_depIdxs, + }.Build() + File_envoy_config_trace_v3_trace_proto = out.File + file_envoy_config_trace_v3_trace_proto_rawDesc = nil + file_envoy_config_trace_v3_trace_proto_goTypes = nil + file_envoy_config_trace_v3_trace_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go new file mode 100644 index 000000000..1797e4924 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/trace.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go new file mode 100644 index 000000000..c040e1e0e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go @@ -0,0 +1,310 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#extension: envoy.tracers.xray] +type XRayConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The UDP endpoint of the X-Ray Daemon where the spans will be sent. + // If this value is not set, the default value of 127.0.0.1:2000 will be used. + DaemonEndpoint *v3.SocketAddress `protobuf:"bytes,1,opt,name=daemon_endpoint,json=daemonEndpoint,proto3" json:"daemon_endpoint,omitempty"` + // The name of the X-Ray segment. + SegmentName string `protobuf:"bytes,2,opt,name=segment_name,json=segmentName,proto3" json:"segment_name,omitempty"` + // The location of a local custom sampling rules JSON file. + // For an example of the sampling rules see: + // `X-Ray SDK documentation + // `_ + SamplingRuleManifest *v3.DataSource `protobuf:"bytes,3,opt,name=sampling_rule_manifest,json=samplingRuleManifest,proto3" json:"sampling_rule_manifest,omitempty"` + // Optional custom fields to be added to each trace segment. + // see: `X-Ray Segment Document documentation + // `__ + SegmentFields *XRayConfig_SegmentFields `protobuf:"bytes,4,opt,name=segment_fields,json=segmentFields,proto3" json:"segment_fields,omitempty"` +} + +func (x *XRayConfig) Reset() { + *x = XRayConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XRayConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XRayConfig) ProtoMessage() {} + +func (x *XRayConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XRayConfig.ProtoReflect.Descriptor instead. +func (*XRayConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_xray_proto_rawDescGZIP(), []int{0} +} + +func (x *XRayConfig) GetDaemonEndpoint() *v3.SocketAddress { + if x != nil { + return x.DaemonEndpoint + } + return nil +} + +func (x *XRayConfig) GetSegmentName() string { + if x != nil { + return x.SegmentName + } + return "" +} + +func (x *XRayConfig) GetSamplingRuleManifest() *v3.DataSource { + if x != nil { + return x.SamplingRuleManifest + } + return nil +} + +func (x *XRayConfig) GetSegmentFields() *XRayConfig_SegmentFields { + if x != nil { + return x.SegmentFields + } + return nil +} + +type XRayConfig_SegmentFields struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". + Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"` + // AWS resource metadata dictionary. + // See: `X-Ray Segment Document documentation `__ + Aws *structpb.Struct `protobuf:"bytes,2,opt,name=aws,proto3" json:"aws,omitempty"` +} + +func (x *XRayConfig_SegmentFields) Reset() { + *x = XRayConfig_SegmentFields{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XRayConfig_SegmentFields) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XRayConfig_SegmentFields) ProtoMessage() {} + +func (x *XRayConfig_SegmentFields) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XRayConfig_SegmentFields.ProtoReflect.Descriptor instead. +func (*XRayConfig_SegmentFields) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_xray_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *XRayConfig_SegmentFields) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *XRayConfig_SegmentFields) GetAws() *structpb.Struct { + if x != nil { + return x.Aws + } + return nil +} + +var File_envoy_config_trace_v3_xray_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_xray_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x78, 0x72, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb8, 0x03, 0x0a, 0x0a, 0x58, 0x52, 0x61, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x0c, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x56, 0x0a, 0x16, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x14, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x0e, 0x73, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x58, 0x52, 0x61, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x52, 0x0d, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x1a, 0x52, 0x0a, 0x0d, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x03, 0x61, 0x77, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x03, 0x61, 0x77, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x58, 0x52, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0xad, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x27, 0x12, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x78, 0x72, 0x61, 0x79, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x09, 0x58, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_xray_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_xray_proto_rawDescData = file_envoy_config_trace_v3_xray_proto_rawDesc +) + +func file_envoy_config_trace_v3_xray_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_xray_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_xray_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_xray_proto_rawDescData) + }) + return file_envoy_config_trace_v3_xray_proto_rawDescData +} + +var file_envoy_config_trace_v3_xray_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_xray_proto_goTypes = []interface{}{ + (*XRayConfig)(nil), // 0: envoy.config.trace.v3.XRayConfig + (*XRayConfig_SegmentFields)(nil), // 1: envoy.config.trace.v3.XRayConfig.SegmentFields + (*v3.SocketAddress)(nil), // 2: envoy.config.core.v3.SocketAddress + (*v3.DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*structpb.Struct)(nil), // 4: google.protobuf.Struct +} +var file_envoy_config_trace_v3_xray_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.XRayConfig.daemon_endpoint:type_name -> envoy.config.core.v3.SocketAddress + 3, // 1: envoy.config.trace.v3.XRayConfig.sampling_rule_manifest:type_name -> envoy.config.core.v3.DataSource + 1, // 2: envoy.config.trace.v3.XRayConfig.segment_fields:type_name -> envoy.config.trace.v3.XRayConfig.SegmentFields + 4, // 3: envoy.config.trace.v3.XRayConfig.SegmentFields.aws:type_name -> google.protobuf.Struct + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_xray_proto_init() } +func file_envoy_config_trace_v3_xray_proto_init() { + if File_envoy_config_trace_v3_xray_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_xray_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XRayConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_xray_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XRayConfig_SegmentFields); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_xray_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_xray_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_xray_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_xray_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_xray_proto = out.File + file_envoy_config_trace_v3_xray_proto_rawDesc = nil + file_envoy_config_trace_v3_xray_proto_goTypes = nil + file_envoy_config_trace_v3_xray_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go new file mode 100644 index 000000000..a48a838ed --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go @@ -0,0 +1,367 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on XRayConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *XRayConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on XRayConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in XRayConfigMultiError, or +// nil if none found. +func (m *XRayConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *XRayConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDaemonEndpoint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDaemonEndpoint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetSegmentName()) < 1 { + err := XRayConfigValidationError{ + field: "SegmentName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSamplingRuleManifest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSamplingRuleManifest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSegmentFields()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSegmentFields()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return XRayConfigMultiError(errors) + } + + return nil +} + +// XRayConfigMultiError is an error wrapping multiple validation errors +// returned by XRayConfig.ValidateAll() if the designated constraints aren't met. +type XRayConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m XRayConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m XRayConfigMultiError) AllErrors() []error { return m } + +// XRayConfigValidationError is the validation error returned by +// XRayConfig.Validate if the designated constraints aren't met. +type XRayConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e XRayConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e XRayConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e XRayConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e XRayConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e XRayConfigValidationError) ErrorName() string { return "XRayConfigValidationError" } + +// Error satisfies the builtin error interface +func (e XRayConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sXRayConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = XRayConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = XRayConfigValidationError{} + +// Validate checks the field values on XRayConfig_SegmentFields with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *XRayConfig_SegmentFields) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on XRayConfig_SegmentFields with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// XRayConfig_SegmentFieldsMultiError, or nil if none found. +func (m *XRayConfig_SegmentFields) ValidateAll() error { + return m.validate(true) +} + +func (m *XRayConfig_SegmentFields) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Origin + + if all { + switch v := interface{}(m.GetAws()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAws()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return XRayConfig_SegmentFieldsMultiError(errors) + } + + return nil +} + +// XRayConfig_SegmentFieldsMultiError is an error wrapping multiple validation +// errors returned by XRayConfig_SegmentFields.ValidateAll() if the designated +// constraints aren't met. +type XRayConfig_SegmentFieldsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m XRayConfig_SegmentFieldsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m XRayConfig_SegmentFieldsMultiError) AllErrors() []error { return m } + +// XRayConfig_SegmentFieldsValidationError is the validation error returned by +// XRayConfig_SegmentFields.Validate if the designated constraints aren't met. +type XRayConfig_SegmentFieldsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e XRayConfig_SegmentFieldsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e XRayConfig_SegmentFieldsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e XRayConfig_SegmentFieldsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e XRayConfig_SegmentFieldsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e XRayConfig_SegmentFieldsValidationError) ErrorName() string { + return "XRayConfig_SegmentFieldsValidationError" +} + +// Error satisfies the builtin error interface +func (e XRayConfig_SegmentFieldsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sXRayConfig_SegmentFields.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = XRayConfig_SegmentFieldsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = XRayConfig_SegmentFieldsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go new file mode 100644 index 000000000..b5bfdff5b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go @@ -0,0 +1,221 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *XRayConfig_SegmentFields) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig_SegmentFields) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig_SegmentFields) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Aws != nil { + size, err := (*structpb.Struct)(m.Aws).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SegmentFields != nil { + size, err := m.SegmentFields.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.SamplingRuleManifest != nil { + if vtmsg, ok := interface{}(m.SamplingRuleManifest).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SamplingRuleManifest) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.SegmentName) > 0 { + i -= len(m.SegmentName) + copy(dAtA[i:], m.SegmentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SegmentName))) + i-- + dAtA[i] = 0x12 + } + if m.DaemonEndpoint != nil { + if vtmsg, ok := interface{}(m.DaemonEndpoint).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DaemonEndpoint) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig_SegmentFields) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Origin) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aws != nil { + l = (*structpb.Struct)(m.Aws).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *XRayConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DaemonEndpoint != nil { + if size, ok := interface{}(m.DaemonEndpoint).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DaemonEndpoint) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SegmentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SamplingRuleManifest != nil { + if size, ok := interface{}(m.SamplingRuleManifest).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SamplingRuleManifest) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SegmentFields != nil { + l = m.SegmentFields.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go new file mode 100644 index 000000000..bf96a43d7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go @@ -0,0 +1,360 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available Zipkin collector endpoint versions. +type ZipkinConfig_CollectorEndpointVersion int32 + +const ( + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. + ZipkinConfig_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE ZipkinConfig_CollectorEndpointVersion = 0 + // Zipkin API v2, JSON over HTTP. + ZipkinConfig_HTTP_JSON ZipkinConfig_CollectorEndpointVersion = 1 + // Zipkin API v2, protobuf over HTTP. + ZipkinConfig_HTTP_PROTO ZipkinConfig_CollectorEndpointVersion = 2 + // [#not-implemented-hide:] + ZipkinConfig_GRPC ZipkinConfig_CollectorEndpointVersion = 3 +) + +// Enum value maps for ZipkinConfig_CollectorEndpointVersion. +var ( + ZipkinConfig_CollectorEndpointVersion_name = map[int32]string{ + 0: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE", + 1: "HTTP_JSON", + 2: "HTTP_PROTO", + 3: "GRPC", + } + ZipkinConfig_CollectorEndpointVersion_value = map[string]int32{ + "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE": 0, + "HTTP_JSON": 1, + "HTTP_PROTO": 2, + "GRPC": 3, + } +) + +func (x ZipkinConfig_CollectorEndpointVersion) Enum() *ZipkinConfig_CollectorEndpointVersion { + p := new(ZipkinConfig_CollectorEndpointVersion) + *p = x + return p +} + +func (x ZipkinConfig_CollectorEndpointVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ZipkinConfig_CollectorEndpointVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_zipkin_proto_enumTypes[0].Descriptor() +} + +func (ZipkinConfig_CollectorEndpointVersion) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_zipkin_proto_enumTypes[0] +} + +func (x ZipkinConfig_CollectorEndpointVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ZipkinConfig_CollectorEndpointVersion.Descriptor instead. +func (ZipkinConfig_CollectorEndpointVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] +// [#next-free-field: 8] +type ZipkinConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster manager cluster that hosts the Zipkin collectors. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation. + CollectorEndpoint string `protobuf:"bytes,2,opt,name=collector_endpoint,json=collectorEndpoint,proto3" json:"collector_endpoint,omitempty"` + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + TraceId_128Bit bool `protobuf:"varint,3,opt,name=trace_id_128bit,json=traceId128bit,proto3" json:"trace_id_128bit,omitempty"` + // Determines whether client and server spans will share the same span context. + // The default value is true. + SharedSpanContext *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=shared_span_context,json=sharedSpanContext,proto3" json:"shared_span_context,omitempty"` + // Determines the selected collector endpoint version. + CollectorEndpointVersion ZipkinConfig_CollectorEndpointVersion `protobuf:"varint,5,opt,name=collector_endpoint_version,json=collectorEndpointVersion,proto3,enum=envoy.config.trace.v3.ZipkinConfig_CollectorEndpointVersion" json:"collector_endpoint_version,omitempty"` + // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors + // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. + CollectorHostname string `protobuf:"bytes,6,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // If this is set to true, then Envoy will be treated as an independent hop in trace chain. A complete span pair will be created for a single + // request. Server span will be created for the downstream request and client span will be created for the related upstream request. + // This should be set to true in the following cases: + // + // * The Envoy Proxy is used as gateway or ingress. + // * The Envoy Proxy is used as sidecar but inbound traffic capturing or outbound traffic capturing is disabled. + // * Any case that the :ref:`start_child_span of router ` is set to true. + // + // .. attention:: + // + // If this is set to true, then the + // :ref:`start_child_span of router ` + // SHOULD be set to true also to ensure the correctness of trace chain. + // + // Both this field and ``start_child_span`` are deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. + SplitSpansForRequest bool `protobuf:"varint,7,opt,name=split_spans_for_request,json=splitSpansForRequest,proto3" json:"split_spans_for_request,omitempty"` +} + +func (x *ZipkinConfig) Reset() { + *x = ZipkinConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_zipkin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ZipkinConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ZipkinConfig) ProtoMessage() {} + +func (x *ZipkinConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_zipkin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ZipkinConfig.ProtoReflect.Descriptor instead. +func (*ZipkinConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP(), []int{0} +} + +func (x *ZipkinConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +func (x *ZipkinConfig) GetCollectorEndpoint() string { + if x != nil { + return x.CollectorEndpoint + } + return "" +} + +func (x *ZipkinConfig) GetTraceId_128Bit() bool { + if x != nil { + return x.TraceId_128Bit + } + return false +} + +func (x *ZipkinConfig) GetSharedSpanContext() *wrapperspb.BoolValue { + if x != nil { + return x.SharedSpanContext + } + return nil +} + +func (x *ZipkinConfig) GetCollectorEndpointVersion() ZipkinConfig_CollectorEndpointVersion { + if x != nil { + return x.CollectorEndpointVersion + } + return ZipkinConfig_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE +} + +func (x *ZipkinConfig) GetCollectorHostname() string { + if x != nil { + return x.CollectorHostname + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. +func (x *ZipkinConfig) GetSplitSpansForRequest() bool { + if x != nil { + return x.SplitSpansForRequest + } + return false +} + +var File_envoy_config_trace_v3_zipkin_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x05, 0x0a, 0x0c, + 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x36, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x31, 0x32, 0x38, 0x62, 0x69, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x31, 0x32, 0x38, 0x62, + 0x69, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x7a, + 0x0a, 0x1a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x18, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x17, 0x73, 0x70, 0x6c, + 0x69, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x53, 0x70, + 0x61, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, + 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, 0x44, 0x45, 0x50, + 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, + 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, + 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0xb1, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x29, 0x12, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x76, 0x34, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0b, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_zipkin_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_zipkin_proto_rawDescData = file_envoy_config_trace_v3_zipkin_proto_rawDesc +) + +func file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_zipkin_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_zipkin_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_zipkin_proto_rawDescData) + }) + return file_envoy_config_trace_v3_zipkin_proto_rawDescData +} + +var file_envoy_config_trace_v3_zipkin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_zipkin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_zipkin_proto_goTypes = []interface{}{ + (ZipkinConfig_CollectorEndpointVersion)(0), // 0: envoy.config.trace.v3.ZipkinConfig.CollectorEndpointVersion + (*ZipkinConfig)(nil), // 1: envoy.config.trace.v3.ZipkinConfig + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue +} +var file_envoy_config_trace_v3_zipkin_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.ZipkinConfig.shared_span_context:type_name -> google.protobuf.BoolValue + 0, // 1: envoy.config.trace.v3.ZipkinConfig.collector_endpoint_version:type_name -> envoy.config.trace.v3.ZipkinConfig.CollectorEndpointVersion + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_zipkin_proto_init() } +func file_envoy_config_trace_v3_zipkin_proto_init() { + if File_envoy_config_trace_v3_zipkin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_zipkin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ZipkinConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_zipkin_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_zipkin_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_zipkin_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_zipkin_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_zipkin_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_zipkin_proto = out.File + file_envoy_config_trace_v3_zipkin_proto_rawDesc = nil + file_envoy_config_trace_v3_zipkin_proto_goTypes = nil + file_envoy_config_trace_v3_zipkin_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go new file mode 100644 index 000000000..d2db7385a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go @@ -0,0 +1,195 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ZipkinConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ZipkinConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ZipkinConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ZipkinConfigMultiError, or +// nil if none found. +func (m *ZipkinConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ZipkinConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := ZipkinConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetCollectorEndpoint()) < 1 { + err := ZipkinConfigValidationError{ + field: "CollectorEndpoint", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for TraceId_128Bit + + if all { + switch v := interface{}(m.GetSharedSpanContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSharedSpanContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CollectorEndpointVersion + + // no validation rules for CollectorHostname + + // no validation rules for SplitSpansForRequest + + if len(errors) > 0 { + return ZipkinConfigMultiError(errors) + } + + return nil +} + +// ZipkinConfigMultiError is an error wrapping multiple validation errors +// returned by ZipkinConfig.ValidateAll() if the designated constraints aren't met. +type ZipkinConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ZipkinConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ZipkinConfigMultiError) AllErrors() []error { return m } + +// ZipkinConfigValidationError is the validation error returned by +// ZipkinConfig.Validate if the designated constraints aren't met. +type ZipkinConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ZipkinConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ZipkinConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ZipkinConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ZipkinConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ZipkinConfigValidationError) ErrorName() string { return "ZipkinConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ZipkinConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sZipkinConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ZipkinConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ZipkinConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go new file mode 100644 index 000000000..2dc450502 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go @@ -0,0 +1,144 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ZipkinConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ZipkinConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ZipkinConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SplitSpansForRequest { + i-- + if m.SplitSpansForRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x32 + } + if m.CollectorEndpointVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CollectorEndpointVersion)) + i-- + dAtA[i] = 0x28 + } + if m.SharedSpanContext != nil { + size, err := (*wrapperspb.BoolValue)(m.SharedSpanContext).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TraceId_128Bit { + i-- + if m.TraceId_128Bit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CollectorEndpoint) > 0 { + i -= len(m.CollectorEndpoint) + copy(dAtA[i:], m.CollectorEndpoint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorEndpoint))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ZipkinConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorEndpoint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceId_128Bit { + n += 2 + } + if m.SharedSpanContext != nil { + l = (*wrapperspb.BoolValue)(m.SharedSpanContext).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CollectorEndpointVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CollectorEndpointVersion)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitSpansForRequest { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go new file mode 100644 index 000000000..eb9a42f3d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go @@ -0,0 +1,2599 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AccessLogType int32 + +const ( + AccessLogType_NotSet AccessLogType = 0 + AccessLogType_TcpUpstreamConnected AccessLogType = 1 + AccessLogType_TcpPeriodic AccessLogType = 2 + AccessLogType_TcpConnectionEnd AccessLogType = 3 + AccessLogType_DownstreamStart AccessLogType = 4 + AccessLogType_DownstreamPeriodic AccessLogType = 5 + AccessLogType_DownstreamEnd AccessLogType = 6 + AccessLogType_UpstreamPoolReady AccessLogType = 7 + AccessLogType_UpstreamPeriodic AccessLogType = 8 + AccessLogType_UpstreamEnd AccessLogType = 9 + AccessLogType_DownstreamTunnelSuccessfullyEstablished AccessLogType = 10 + AccessLogType_UdpTunnelUpstreamConnected AccessLogType = 11 + AccessLogType_UdpPeriodic AccessLogType = 12 + AccessLogType_UdpSessionEnd AccessLogType = 13 +) + +// Enum value maps for AccessLogType. +var ( + AccessLogType_name = map[int32]string{ + 0: "NotSet", + 1: "TcpUpstreamConnected", + 2: "TcpPeriodic", + 3: "TcpConnectionEnd", + 4: "DownstreamStart", + 5: "DownstreamPeriodic", + 6: "DownstreamEnd", + 7: "UpstreamPoolReady", + 8: "UpstreamPeriodic", + 9: "UpstreamEnd", + 10: "DownstreamTunnelSuccessfullyEstablished", + 11: "UdpTunnelUpstreamConnected", + 12: "UdpPeriodic", + 13: "UdpSessionEnd", + } + AccessLogType_value = map[string]int32{ + "NotSet": 0, + "TcpUpstreamConnected": 1, + "TcpPeriodic": 2, + "TcpConnectionEnd": 3, + "DownstreamStart": 4, + "DownstreamPeriodic": 5, + "DownstreamEnd": 6, + "UpstreamPoolReady": 7, + "UpstreamPeriodic": 8, + "UpstreamEnd": 9, + "DownstreamTunnelSuccessfullyEstablished": 10, + "UdpTunnelUpstreamConnected": 11, + "UdpPeriodic": 12, + "UdpSessionEnd": 13, + } +) + +func (x AccessLogType) Enum() *AccessLogType { + p := new(AccessLogType) + *p = x + return p +} + +func (x AccessLogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AccessLogType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor() +} + +func (AccessLogType) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0] +} + +func (x AccessLogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AccessLogType.Descriptor instead. +func (AccessLogType) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +// HTTP version +type HTTPAccessLogEntry_HTTPVersion int32 + +const ( + HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED HTTPAccessLogEntry_HTTPVersion = 0 + HTTPAccessLogEntry_HTTP10 HTTPAccessLogEntry_HTTPVersion = 1 + HTTPAccessLogEntry_HTTP11 HTTPAccessLogEntry_HTTPVersion = 2 + HTTPAccessLogEntry_HTTP2 HTTPAccessLogEntry_HTTPVersion = 3 + HTTPAccessLogEntry_HTTP3 HTTPAccessLogEntry_HTTPVersion = 4 +) + +// Enum value maps for HTTPAccessLogEntry_HTTPVersion. +var ( + HTTPAccessLogEntry_HTTPVersion_name = map[int32]string{ + 0: "PROTOCOL_UNSPECIFIED", + 1: "HTTP10", + 2: "HTTP11", + 3: "HTTP2", + 4: "HTTP3", + } + HTTPAccessLogEntry_HTTPVersion_value = map[string]int32{ + "PROTOCOL_UNSPECIFIED": 0, + "HTTP10": 1, + "HTTP11": 2, + "HTTP2": 3, + "HTTP3": 4, + } +) + +func (x HTTPAccessLogEntry_HTTPVersion) Enum() *HTTPAccessLogEntry_HTTPVersion { + p := new(HTTPAccessLogEntry_HTTPVersion) + *p = x + return p +} + +func (x HTTPAccessLogEntry_HTTPVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HTTPAccessLogEntry_HTTPVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor() +} + +func (HTTPAccessLogEntry_HTTPVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1] +} + +func (x HTTPAccessLogEntry_HTTPVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HTTPAccessLogEntry_HTTPVersion.Descriptor instead. +func (HTTPAccessLogEntry_HTTPVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1, 0} +} + +// Reasons why the request was unauthorized +type ResponseFlags_Unauthorized_Reason int32 + +const ( + ResponseFlags_Unauthorized_REASON_UNSPECIFIED ResponseFlags_Unauthorized_Reason = 0 + // The request was denied by the external authorization service. + ResponseFlags_Unauthorized_EXTERNAL_SERVICE ResponseFlags_Unauthorized_Reason = 1 +) + +// Enum value maps for ResponseFlags_Unauthorized_Reason. +var ( + ResponseFlags_Unauthorized_Reason_name = map[int32]string{ + 0: "REASON_UNSPECIFIED", + 1: "EXTERNAL_SERVICE", + } + ResponseFlags_Unauthorized_Reason_value = map[string]int32{ + "REASON_UNSPECIFIED": 0, + "EXTERNAL_SERVICE": 1, + } +) + +func (x ResponseFlags_Unauthorized_Reason) Enum() *ResponseFlags_Unauthorized_Reason { + p := new(ResponseFlags_Unauthorized_Reason) + *p = x + return p +} + +func (x ResponseFlags_Unauthorized_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResponseFlags_Unauthorized_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2].Descriptor() +} + +func (ResponseFlags_Unauthorized_Reason) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2] +} + +func (x ResponseFlags_Unauthorized_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized_Reason.Descriptor instead. +func (ResponseFlags_Unauthorized_Reason) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0, 0} +} + +type TLSProperties_TLSVersion int32 + +const ( + TLSProperties_VERSION_UNSPECIFIED TLSProperties_TLSVersion = 0 + TLSProperties_TLSv1 TLSProperties_TLSVersion = 1 + TLSProperties_TLSv1_1 TLSProperties_TLSVersion = 2 + TLSProperties_TLSv1_2 TLSProperties_TLSVersion = 3 + TLSProperties_TLSv1_3 TLSProperties_TLSVersion = 4 +) + +// Enum value maps for TLSProperties_TLSVersion. +var ( + TLSProperties_TLSVersion_name = map[int32]string{ + 0: "VERSION_UNSPECIFIED", + 1: "TLSv1", + 2: "TLSv1_1", + 3: "TLSv1_2", + 4: "TLSv1_3", + } + TLSProperties_TLSVersion_value = map[string]int32{ + "VERSION_UNSPECIFIED": 0, + "TLSv1": 1, + "TLSv1_1": 2, + "TLSv1_2": 3, + "TLSv1_3": 4, + } +) + +func (x TLSProperties_TLSVersion) Enum() *TLSProperties_TLSVersion { + p := new(TLSProperties_TLSVersion) + *p = x + return p +} + +func (x TLSProperties_TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSProperties_TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3].Descriptor() +} + +func (TLSProperties_TLSVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3] +} + +func (x TLSProperties_TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSProperties_TLSVersion.Descriptor instead. +func (TLSProperties_TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +type TCPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + // Properties of the TCP connection. + ConnectionProperties *ConnectionProperties `protobuf:"bytes,2,opt,name=connection_properties,json=connectionProperties,proto3" json:"connection_properties,omitempty"` +} + +func (x *TCPAccessLogEntry) Reset() { + *x = TCPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCPAccessLogEntry) ProtoMessage() {} + +func (x *TCPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*TCPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *TCPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *TCPAccessLogEntry) GetConnectionProperties() *ConnectionProperties { + if x != nil { + return x.ConnectionProperties + } + return nil +} + +type HTTPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + ProtocolVersion HTTPAccessLogEntry_HTTPVersion `protobuf:"varint,2,opt,name=protocol_version,json=protocolVersion,proto3,enum=envoy.data.accesslog.v3.HTTPAccessLogEntry_HTTPVersion" json:"protocol_version,omitempty"` + // Description of the incoming HTTP request. + Request *HTTPRequestProperties `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + // Description of the outgoing HTTP response. + Response *HTTPResponseProperties `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *HTTPAccessLogEntry) Reset() { + *x = HTTPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPAccessLogEntry) ProtoMessage() {} + +func (x *HTTPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*HTTPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *HTTPAccessLogEntry) GetProtocolVersion() HTTPAccessLogEntry_HTTPVersion { + if x != nil { + return x.ProtocolVersion + } + return HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED +} + +func (x *HTTPAccessLogEntry) GetRequest() *HTTPRequestProperties { + if x != nil { + return x.Request + } + return nil +} + +func (x *HTTPAccessLogEntry) GetResponse() *HTTPResponseProperties { + if x != nil { + return x.Response + } + return nil +} + +// Defines fields for a connection +type ConnectionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of bytes received from downstream. + ReceivedBytes uint64 `protobuf:"varint,1,opt,name=received_bytes,json=receivedBytes,proto3" json:"received_bytes,omitempty"` + // Number of bytes sent to downstream. + SentBytes uint64 `protobuf:"varint,2,opt,name=sent_bytes,json=sentBytes,proto3" json:"sent_bytes,omitempty"` +} + +func (x *ConnectionProperties) Reset() { + *x = ConnectionProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionProperties) ProtoMessage() {} + +func (x *ConnectionProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionProperties.ProtoReflect.Descriptor instead. +func (*ConnectionProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *ConnectionProperties) GetReceivedBytes() uint64 { + if x != nil { + return x.ReceivedBytes + } + return 0 +} + +func (x *ConnectionProperties) GetSentBytes() uint64 { + if x != nil { + return x.SentBytes + } + return 0 +} + +// Defines fields that are shared by all Envoy access logs. +// [#next-free-field: 34] +type AccessLogCommon struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] + // This field indicates the rate at which this log entry was sampled. + // Valid range is (0.0, 1.0]. + SampleRate float64 `protobuf:"fixed64,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"` + // This field is the remote/origin address on which the request from the user was received. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. + DownstreamRemoteAddress *v3.Address `protobuf:"bytes,2,opt,name=downstream_remote_address,json=downstreamRemoteAddress,proto3" json:"downstream_remote_address,omitempty"` + // This field is the local/destination address on which the request from the user was received. + DownstreamLocalAddress *v3.Address `protobuf:"bytes,3,opt,name=downstream_local_address,json=downstreamLocalAddress,proto3" json:"downstream_local_address,omitempty"` + // If the connection is secure,S this field will contain TLS properties. + TlsProperties *TLSProperties `protobuf:"bytes,4,opt,name=tls_properties,json=tlsProperties,proto3" json:"tls_properties,omitempty"` + // The time that Envoy started servicing this request. This is effectively the time that the first + // downstream byte is received. + StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Interval between the first downstream byte received and the last + // downstream byte received (i.e. time it takes to receive a request). + TimeToLastRxByte *durationpb.Duration `protobuf:"bytes,6,opt,name=time_to_last_rx_byte,json=timeToLastRxByte,proto3" json:"time_to_last_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream byte sent. There may + // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. + // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about + // not accounting for kernel socket buffer time, etc. + TimeToFirstUpstreamTxByte *durationpb.Duration `protobuf:"bytes,7,opt,name=time_to_first_upstream_tx_byte,json=timeToFirstUpstreamTxByte,proto3" json:"time_to_first_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream byte sent. There may + // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. + // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about + // not accounting for kernel socket buffer time, etc. + TimeToLastUpstreamTxByte *durationpb.Duration `protobuf:"bytes,8,opt,name=time_to_last_upstream_tx_byte,json=timeToLastUpstreamTxByte,proto3" json:"time_to_last_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream + // byte received (i.e. time it takes to start receiving a response). + TimeToFirstUpstreamRxByte *durationpb.Duration `protobuf:"bytes,9,opt,name=time_to_first_upstream_rx_byte,json=timeToFirstUpstreamRxByte,proto3" json:"time_to_first_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream + // byte received (i.e. time it takes to receive a complete response). + TimeToLastUpstreamRxByte *durationpb.Duration `protobuf:"bytes,10,opt,name=time_to_last_upstream_rx_byte,json=timeToLastUpstreamRxByte,proto3" json:"time_to_last_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first downstream byte sent. + // There may be a considerable delta between the “time_to_first_upstream_rx_byte“ and this field + // due to filters. Additionally, the same caveats apply as documented in + // “time_to_last_downstream_tx_byte“ about not accounting for kernel socket buffer time, etc. + TimeToFirstDownstreamTxByte *durationpb.Duration `protobuf:"bytes,11,opt,name=time_to_first_downstream_tx_byte,json=timeToFirstDownstreamTxByte,proto3" json:"time_to_first_downstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last downstream byte sent. + // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + // between “time_to_last_upstream_rx_byte“ and this field. Note also that this is an approximate + // time. In the current implementation it does not include kernel socket buffer time. In the + // current implementation it also does not include send window buffering inside the HTTP/2 codec. + // In the future it is likely that work will be done to make this duration more accurate. + TimeToLastDownstreamTxByte *durationpb.Duration `protobuf:"bytes,12,opt,name=time_to_last_downstream_tx_byte,json=timeToLastDownstreamTxByte,proto3" json:"time_to_last_downstream_tx_byte,omitempty"` + // The upstream remote/destination address that handles this exchange. This does not include + // retries. + UpstreamRemoteAddress *v3.Address `protobuf:"bytes,13,opt,name=upstream_remote_address,json=upstreamRemoteAddress,proto3" json:"upstream_remote_address,omitempty"` + // The upstream local/origin address that handles this exchange. This does not include retries. + UpstreamLocalAddress *v3.Address `protobuf:"bytes,14,opt,name=upstream_local_address,json=upstreamLocalAddress,proto3" json:"upstream_local_address,omitempty"` + // The upstream cluster that “upstream_remote_address“ belongs to. + UpstreamCluster string `protobuf:"bytes,15,opt,name=upstream_cluster,json=upstreamCluster,proto3" json:"upstream_cluster,omitempty"` + // Flags indicating occurrences during request/response processing. + ResponseFlags *ResponseFlags `protobuf:"bytes,16,opt,name=response_flags,json=responseFlags,proto3" json:"response_flags,omitempty"` + // All metadata encountered during request processing, including endpoint + // selection. + // + // This can be used to associate IDs attached to the various configurations + // used to process this request with the access log entry. For example, a + // route created from a higher level forwarding rule with some ID can place + // that ID in this field and cross reference later. It can also be used to + // determine if a canary endpoint was used or not. + Metadata *v3.Metadata `protobuf:"bytes,17,opt,name=metadata,proto3" json:"metadata,omitempty"` + // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured + // upstream transport socket. Common TLS failures are in + // :ref:`TLS trouble shooting `. + UpstreamTransportFailureReason string `protobuf:"bytes,18,opt,name=upstream_transport_failure_reason,json=upstreamTransportFailureReason,proto3" json:"upstream_transport_failure_reason,omitempty"` + // The name of the route + RouteName string `protobuf:"bytes,19,opt,name=route_name,json=routeName,proto3" json:"route_name,omitempty"` + // This field is the downstream direct remote address on which the request from the user was + // received. Note: This is always the physical peer, even if the remote address is inferred from + // for example the x-forwarder-for header, proxy protocol, etc. + DownstreamDirectRemoteAddress *v3.Address `protobuf:"bytes,20,opt,name=downstream_direct_remote_address,json=downstreamDirectRemoteAddress,proto3" json:"downstream_direct_remote_address,omitempty"` + // Map of filter state in stream info that have been configured to be logged. If the filter + // state serialized to any message other than “google.protobuf.Any“ it will be packed into + // “google.protobuf.Any“. + FilterStateObjects map[string]*anypb.Any `protobuf:"bytes,21,rep,name=filter_state_objects,json=filterStateObjects,proto3" json:"filter_state_objects,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of custom tags, which annotate logs with additional information. + // To configure this value, users should configure + // :ref:`custom_tags `. + CustomTags map[string]string `protobuf:"bytes,22,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // For HTTP: Total duration in milliseconds of the request from the start time to the last byte out. + // For TCP: Total duration in milliseconds of the downstream connection. + // This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) + // and may be longer than “time_to_last_downstream_tx_byte“. + Duration *durationpb.Duration `protobuf:"bytes,23,opt,name=duration,proto3" json:"duration,omitempty"` + // For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. + // For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. + UpstreamRequestAttemptCount uint32 `protobuf:"varint,24,opt,name=upstream_request_attempt_count,json=upstreamRequestAttemptCount,proto3" json:"upstream_request_attempt_count,omitempty"` + // Connection termination details may provide additional information about why the connection was terminated by Envoy for L4 reasons. + ConnectionTerminationDetails string `protobuf:"bytes,25,opt,name=connection_termination_details,json=connectionTerminationDetails,proto3" json:"connection_termination_details,omitempty"` + // Optional unique id of stream (TCP connection, long-live HTTP2 stream, HTTP request) for logging and tracing. + // This could be any format string that could be used to identify one stream. + StreamId string `protobuf:"bytes,26,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + // If this log entry is final log entry that flushed after the stream completed or + // intermediate log entry that flushed periodically during the stream. + // There may be multiple intermediate log entries and only one final log entry for each + // long-live stream (TCP connection, long-live HTTP2 stream). + // And if it is necessary, unique ID or identifier can be added to the log entry + // :ref:`stream_id ` to + // correlate all these intermediate log entries and final log entry. + // + // .. attention:: + // + // This field is deprecated in favor of ``access_log_type`` for better indication of the + // type of the access log record. + // + // Deprecated: Marked as deprecated in envoy/data/accesslog/v3/accesslog.proto. + IntermediateLogEntry bool `protobuf:"varint,27,opt,name=intermediate_log_entry,json=intermediateLogEntry,proto3" json:"intermediate_log_entry,omitempty"` + // If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured downstream + // transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + DownstreamTransportFailureReason string `protobuf:"bytes,28,opt,name=downstream_transport_failure_reason,json=downstreamTransportFailureReason,proto3" json:"downstream_transport_failure_reason,omitempty"` + // For HTTP: Total number of bytes sent to the downstream by the http stream. + // For TCP: Total number of bytes sent to the downstream by the tcp proxy. + DownstreamWireBytesSent uint64 `protobuf:"varint,29,opt,name=downstream_wire_bytes_sent,json=downstreamWireBytesSent,proto3" json:"downstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + // For TCP: Total number of bytes received from the downstream by the tcp proxy. + DownstreamWireBytesReceived uint64 `protobuf:"varint,30,opt,name=downstream_wire_bytes_received,json=downstreamWireBytesReceived,proto3" json:"downstream_wire_bytes_received,omitempty"` + // For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesSent uint64 `protobuf:"varint,31,opt,name=upstream_wire_bytes_sent,json=upstreamWireBytesSent,proto3" json:"upstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the upstream by the http stream. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesReceived uint64 `protobuf:"varint,32,opt,name=upstream_wire_bytes_received,json=upstreamWireBytesReceived,proto3" json:"upstream_wire_bytes_received,omitempty"` + // The type of the access log, which indicates when the log was recorded. + // See :ref:`ACCESS_LOG_TYPE ` for the available values. + // In case the access log was recorded by a flow which does not correspond to one of the supported + // values, then the default value will be “NotSet“. + // For more information about how access log behaves and when it is being recorded, + // please refer to :ref:`access logging `. + AccessLogType AccessLogType `protobuf:"varint,33,opt,name=access_log_type,json=accessLogType,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"access_log_type,omitempty"` +} + +func (x *AccessLogCommon) Reset() { + *x = AccessLogCommon{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLogCommon) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLogCommon) ProtoMessage() {} + +func (x *AccessLogCommon) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLogCommon.ProtoReflect.Descriptor instead. +func (*AccessLogCommon) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *AccessLogCommon) GetSampleRate() float64 { + if x != nil { + return x.SampleRate + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetDownstreamLocalAddress() *v3.Address { + if x != nil { + return x.DownstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetTlsProperties() *TLSProperties { + if x != nil { + return x.TlsProperties + } + return nil +} + +func (x *AccessLogCommon) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstDownstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastDownstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRemoteAddress() *v3.Address { + if x != nil { + return x.UpstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamLocalAddress() *v3.Address { + if x != nil { + return x.UpstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamCluster() string { + if x != nil { + return x.UpstreamCluster + } + return "" +} + +func (x *AccessLogCommon) GetResponseFlags() *ResponseFlags { + if x != nil { + return x.ResponseFlags + } + return nil +} + +func (x *AccessLogCommon) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamTransportFailureReason() string { + if x != nil { + return x.UpstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetRouteName() string { + if x != nil { + return x.RouteName + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamDirectRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamDirectRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetFilterStateObjects() map[string]*anypb.Any { + if x != nil { + return x.FilterStateObjects + } + return nil +} + +func (x *AccessLogCommon) GetCustomTags() map[string]string { + if x != nil { + return x.CustomTags + } + return nil +} + +func (x *AccessLogCommon) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRequestAttemptCount() uint32 { + if x != nil { + return x.UpstreamRequestAttemptCount + } + return 0 +} + +func (x *AccessLogCommon) GetConnectionTerminationDetails() string { + if x != nil { + return x.ConnectionTerminationDetails + } + return "" +} + +func (x *AccessLogCommon) GetStreamId() string { + if x != nil { + return x.StreamId + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/data/accesslog/v3/accesslog.proto. +func (x *AccessLogCommon) GetIntermediateLogEntry() bool { + if x != nil { + return x.IntermediateLogEntry + } + return false +} + +func (x *AccessLogCommon) GetDownstreamTransportFailureReason() string { + if x != nil { + return x.DownstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamWireBytesSent() uint64 { + if x != nil { + return x.DownstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamWireBytesReceived() uint64 { + if x != nil { + return x.DownstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesSent() uint64 { + if x != nil { + return x.UpstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesReceived() uint64 { + if x != nil { + return x.UpstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetAccessLogType() AccessLogType { + if x != nil { + return x.AccessLogType + } + return AccessLogType_NotSet +} + +// Flags indicating occurrences during request/response processing. +// [#next-free-field: 29] +type ResponseFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates local server healthcheck failed. + FailedLocalHealthcheck bool `protobuf:"varint,1,opt,name=failed_local_healthcheck,json=failedLocalHealthcheck,proto3" json:"failed_local_healthcheck,omitempty"` + // Indicates there was no healthy upstream. + NoHealthyUpstream bool `protobuf:"varint,2,opt,name=no_healthy_upstream,json=noHealthyUpstream,proto3" json:"no_healthy_upstream,omitempty"` + // Indicates an there was an upstream request timeout. + UpstreamRequestTimeout bool `protobuf:"varint,3,opt,name=upstream_request_timeout,json=upstreamRequestTimeout,proto3" json:"upstream_request_timeout,omitempty"` + // Indicates local codec level reset was sent on the stream. + LocalReset bool `protobuf:"varint,4,opt,name=local_reset,json=localReset,proto3" json:"local_reset,omitempty"` + // Indicates remote codec level reset was received on the stream. + UpstreamRemoteReset bool `protobuf:"varint,5,opt,name=upstream_remote_reset,json=upstreamRemoteReset,proto3" json:"upstream_remote_reset,omitempty"` + // Indicates there was a local reset by a connection pool due to an initial connection failure. + UpstreamConnectionFailure bool `protobuf:"varint,6,opt,name=upstream_connection_failure,json=upstreamConnectionFailure,proto3" json:"upstream_connection_failure,omitempty"` + // Indicates the stream was reset due to an upstream connection termination. + UpstreamConnectionTermination bool `protobuf:"varint,7,opt,name=upstream_connection_termination,json=upstreamConnectionTermination,proto3" json:"upstream_connection_termination,omitempty"` + // Indicates the stream was reset because of a resource overflow. + UpstreamOverflow bool `protobuf:"varint,8,opt,name=upstream_overflow,json=upstreamOverflow,proto3" json:"upstream_overflow,omitempty"` + // Indicates no route was found for the request. + NoRouteFound bool `protobuf:"varint,9,opt,name=no_route_found,json=noRouteFound,proto3" json:"no_route_found,omitempty"` + // Indicates that the request was delayed before proxying. + DelayInjected bool `protobuf:"varint,10,opt,name=delay_injected,json=delayInjected,proto3" json:"delay_injected,omitempty"` + // Indicates that the request was aborted with an injected error code. + FaultInjected bool `protobuf:"varint,11,opt,name=fault_injected,json=faultInjected,proto3" json:"fault_injected,omitempty"` + // Indicates that the request was rate-limited locally. + RateLimited bool `protobuf:"varint,12,opt,name=rate_limited,json=rateLimited,proto3" json:"rate_limited,omitempty"` + // Indicates if the request was deemed unauthorized and the reason for it. + UnauthorizedDetails *ResponseFlags_Unauthorized `protobuf:"bytes,13,opt,name=unauthorized_details,json=unauthorizedDetails,proto3" json:"unauthorized_details,omitempty"` + // Indicates that the request was rejected because there was an error in rate limit service. + RateLimitServiceError bool `protobuf:"varint,14,opt,name=rate_limit_service_error,json=rateLimitServiceError,proto3" json:"rate_limit_service_error,omitempty"` + // Indicates the stream was reset due to a downstream connection termination. + DownstreamConnectionTermination bool `protobuf:"varint,15,opt,name=downstream_connection_termination,json=downstreamConnectionTermination,proto3" json:"downstream_connection_termination,omitempty"` + // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + UpstreamRetryLimitExceeded bool `protobuf:"varint,16,opt,name=upstream_retry_limit_exceeded,json=upstreamRetryLimitExceeded,proto3" json:"upstream_retry_limit_exceeded,omitempty"` + // Indicates that the stream idle timeout was hit, resulting in a downstream 408. + StreamIdleTimeout bool `protobuf:"varint,17,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + // Indicates that the request was rejected because an envoy request header failed strict + // validation. + InvalidEnvoyRequestHeaders bool `protobuf:"varint,18,opt,name=invalid_envoy_request_headers,json=invalidEnvoyRequestHeaders,proto3" json:"invalid_envoy_request_headers,omitempty"` + // Indicates there was an HTTP protocol error on the downstream request. + DownstreamProtocolError bool `protobuf:"varint,19,opt,name=downstream_protocol_error,json=downstreamProtocolError,proto3" json:"downstream_protocol_error,omitempty"` + // Indicates there was a max stream duration reached on the upstream request. + UpstreamMaxStreamDurationReached bool `protobuf:"varint,20,opt,name=upstream_max_stream_duration_reached,json=upstreamMaxStreamDurationReached,proto3" json:"upstream_max_stream_duration_reached,omitempty"` + // Indicates the response was served from a cache filter. + ResponseFromCacheFilter bool `protobuf:"varint,21,opt,name=response_from_cache_filter,json=responseFromCacheFilter,proto3" json:"response_from_cache_filter,omitempty"` + // Indicates that a filter configuration is not available. + NoFilterConfigFound bool `protobuf:"varint,22,opt,name=no_filter_config_found,json=noFilterConfigFound,proto3" json:"no_filter_config_found,omitempty"` + // Indicates that request or connection exceeded the downstream connection duration. + DurationTimeout bool `protobuf:"varint,23,opt,name=duration_timeout,json=durationTimeout,proto3" json:"duration_timeout,omitempty"` + // Indicates there was an HTTP protocol error in the upstream response. + UpstreamProtocolError bool `protobuf:"varint,24,opt,name=upstream_protocol_error,json=upstreamProtocolError,proto3" json:"upstream_protocol_error,omitempty"` + // Indicates no cluster was found for the request. + NoClusterFound bool `protobuf:"varint,25,opt,name=no_cluster_found,json=noClusterFound,proto3" json:"no_cluster_found,omitempty"` + // Indicates overload manager terminated the request. + OverloadManager bool `protobuf:"varint,26,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` + // Indicates a DNS resolution failed. + DnsResolutionFailure bool `protobuf:"varint,27,opt,name=dns_resolution_failure,json=dnsResolutionFailure,proto3" json:"dns_resolution_failure,omitempty"` + // Indicates a downstream remote codec level reset was received on the stream + DownstreamRemoteReset bool `protobuf:"varint,28,opt,name=downstream_remote_reset,json=downstreamRemoteReset,proto3" json:"downstream_remote_reset,omitempty"` +} + +func (x *ResponseFlags) Reset() { + *x = ResponseFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags) ProtoMessage() {} + +func (x *ResponseFlags) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags.ProtoReflect.Descriptor instead. +func (*ResponseFlags) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *ResponseFlags) GetFailedLocalHealthcheck() bool { + if x != nil { + return x.FailedLocalHealthcheck + } + return false +} + +func (x *ResponseFlags) GetNoHealthyUpstream() bool { + if x != nil { + return x.NoHealthyUpstream + } + return false +} + +func (x *ResponseFlags) GetUpstreamRequestTimeout() bool { + if x != nil { + return x.UpstreamRequestTimeout + } + return false +} + +func (x *ResponseFlags) GetLocalReset() bool { + if x != nil { + return x.LocalReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamRemoteReset() bool { + if x != nil { + return x.UpstreamRemoteReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionFailure() bool { + if x != nil { + return x.UpstreamConnectionFailure + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionTermination() bool { + if x != nil { + return x.UpstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamOverflow() bool { + if x != nil { + return x.UpstreamOverflow + } + return false +} + +func (x *ResponseFlags) GetNoRouteFound() bool { + if x != nil { + return x.NoRouteFound + } + return false +} + +func (x *ResponseFlags) GetDelayInjected() bool { + if x != nil { + return x.DelayInjected + } + return false +} + +func (x *ResponseFlags) GetFaultInjected() bool { + if x != nil { + return x.FaultInjected + } + return false +} + +func (x *ResponseFlags) GetRateLimited() bool { + if x != nil { + return x.RateLimited + } + return false +} + +func (x *ResponseFlags) GetUnauthorizedDetails() *ResponseFlags_Unauthorized { + if x != nil { + return x.UnauthorizedDetails + } + return nil +} + +func (x *ResponseFlags) GetRateLimitServiceError() bool { + if x != nil { + return x.RateLimitServiceError + } + return false +} + +func (x *ResponseFlags) GetDownstreamConnectionTermination() bool { + if x != nil { + return x.DownstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamRetryLimitExceeded() bool { + if x != nil { + return x.UpstreamRetryLimitExceeded + } + return false +} + +func (x *ResponseFlags) GetStreamIdleTimeout() bool { + if x != nil { + return x.StreamIdleTimeout + } + return false +} + +func (x *ResponseFlags) GetInvalidEnvoyRequestHeaders() bool { + if x != nil { + return x.InvalidEnvoyRequestHeaders + } + return false +} + +func (x *ResponseFlags) GetDownstreamProtocolError() bool { + if x != nil { + return x.DownstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetUpstreamMaxStreamDurationReached() bool { + if x != nil { + return x.UpstreamMaxStreamDurationReached + } + return false +} + +func (x *ResponseFlags) GetResponseFromCacheFilter() bool { + if x != nil { + return x.ResponseFromCacheFilter + } + return false +} + +func (x *ResponseFlags) GetNoFilterConfigFound() bool { + if x != nil { + return x.NoFilterConfigFound + } + return false +} + +func (x *ResponseFlags) GetDurationTimeout() bool { + if x != nil { + return x.DurationTimeout + } + return false +} + +func (x *ResponseFlags) GetUpstreamProtocolError() bool { + if x != nil { + return x.UpstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetNoClusterFound() bool { + if x != nil { + return x.NoClusterFound + } + return false +} + +func (x *ResponseFlags) GetOverloadManager() bool { + if x != nil { + return x.OverloadManager + } + return false +} + +func (x *ResponseFlags) GetDnsResolutionFailure() bool { + if x != nil { + return x.DnsResolutionFailure + } + return false +} + +func (x *ResponseFlags) GetDownstreamRemoteReset() bool { + if x != nil { + return x.DownstreamRemoteReset + } + return false +} + +// Properties of a negotiated TLS connection. +// [#next-free-field: 8] +type TLSProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of TLS that was negotiated. + TlsVersion TLSProperties_TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=envoy.data.accesslog.v3.TLSProperties_TLSVersion" json:"tls_version,omitempty"` + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. “009C“ for “TLS_RSA_WITH_AES_128_GCM_SHA256“). + // + // Here it is expressed as an integer. + TlsCipherSuite *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tls_cipher_suite,json=tlsCipherSuite,proto3" json:"tls_cipher_suite,omitempty"` + // SNI hostname from handshake. + TlsSniHostname string `protobuf:"bytes,3,opt,name=tls_sni_hostname,json=tlsSniHostname,proto3" json:"tls_sni_hostname,omitempty"` + // Properties of the local certificate used to negotiate TLS. + LocalCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,4,opt,name=local_certificate_properties,json=localCertificateProperties,proto3" json:"local_certificate_properties,omitempty"` + // Properties of the peer certificate used to negotiate TLS. + PeerCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,5,opt,name=peer_certificate_properties,json=peerCertificateProperties,proto3" json:"peer_certificate_properties,omitempty"` + // The TLS session ID. + TlsSessionId string `protobuf:"bytes,6,opt,name=tls_session_id,json=tlsSessionId,proto3" json:"tls_session_id,omitempty"` + // The “JA3“ fingerprint when “JA3“ fingerprinting is enabled. + Ja3Fingerprint string `protobuf:"bytes,7,opt,name=ja3_fingerprint,json=ja3Fingerprint,proto3" json:"ja3_fingerprint,omitempty"` +} + +func (x *TLSProperties) Reset() { + *x = TLSProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties) ProtoMessage() {} + +func (x *TLSProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5} +} + +func (x *TLSProperties) GetTlsVersion() TLSProperties_TLSVersion { + if x != nil { + return x.TlsVersion + } + return TLSProperties_VERSION_UNSPECIFIED +} + +func (x *TLSProperties) GetTlsCipherSuite() *wrapperspb.UInt32Value { + if x != nil { + return x.TlsCipherSuite + } + return nil +} + +func (x *TLSProperties) GetTlsSniHostname() string { + if x != nil { + return x.TlsSniHostname + } + return "" +} + +func (x *TLSProperties) GetLocalCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.LocalCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetPeerCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.PeerCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetTlsSessionId() string { + if x != nil { + return x.TlsSessionId + } + return "" +} + +func (x *TLSProperties) GetJa3Fingerprint() string { + if x != nil { + return x.Ja3Fingerprint + } + return "" +} + +// [#next-free-field: 16] +type HTTPRequestProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request method (RFC 7231/2616). + RequestMethod v3.RequestMethod `protobuf:"varint,1,opt,name=request_method,json=requestMethod,proto3,enum=envoy.config.core.v3.RequestMethod" json:"request_method,omitempty"` + // The scheme portion of the incoming request URI. + Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` + // HTTP/2 “:authority“ or HTTP/1.1 “Host“ header value. + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // The port of the incoming request URI + // (unused currently, as port is composed onto authority). + Port *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=port,proto3" json:"port,omitempty"` + // The path portion from the incoming request URI. + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + // Value of the “User-Agent“ request header. + UserAgent string `protobuf:"bytes,6,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` + // Value of the “Referer“ request header. + Referer string `protobuf:"bytes,7,opt,name=referer,proto3" json:"referer,omitempty"` + // Value of the “X-Forwarded-For“ request header. + ForwardedFor string `protobuf:"bytes,8,opt,name=forwarded_for,json=forwardedFor,proto3" json:"forwarded_for,omitempty"` + // Value of the “X-Request-Id“ request header + // + // This header is used by Envoy to uniquely identify a request. + // It will be generated for all external requests and internal requests that + // do not already have a request ID. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Value of the “X-Envoy-Original-Path“ request header. + OriginalPath string `protobuf:"bytes,10,opt,name=original_path,json=originalPath,proto3" json:"original_path,omitempty"` + // Size of the HTTP request headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestHeadersBytes uint64 `protobuf:"varint,11,opt,name=request_headers_bytes,json=requestHeadersBytes,proto3" json:"request_headers_bytes,omitempty"` + // Size of the HTTP request body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestBodyBytes uint64 `protobuf:"varint,12,opt,name=request_body_bytes,json=requestBodyBytes,proto3" json:"request_body_bytes,omitempty"` + // Map of additional headers that have been configured to be logged. + RequestHeaders map[string]string `protobuf:"bytes,13,rep,name=request_headers,json=requestHeaders,proto3" json:"request_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Number of header bytes sent to the upstream by the http stream, including protocol overhead. + // + // This value accumulates during upstream retries. + UpstreamHeaderBytesSent uint64 `protobuf:"varint,14,opt,name=upstream_header_bytes_sent,json=upstreamHeaderBytesSent,proto3" json:"upstream_header_bytes_sent,omitempty"` + // Number of header bytes received from the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesReceived uint64 `protobuf:"varint,15,opt,name=downstream_header_bytes_received,json=downstreamHeaderBytesReceived,proto3" json:"downstream_header_bytes_received,omitempty"` +} + +func (x *HTTPRequestProperties) Reset() { + *x = HTTPRequestProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPRequestProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPRequestProperties) ProtoMessage() {} + +func (x *HTTPRequestProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPRequestProperties.ProtoReflect.Descriptor instead. +func (*HTTPRequestProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6} +} + +func (x *HTTPRequestProperties) GetRequestMethod() v3.RequestMethod { + if x != nil { + return x.RequestMethod + } + return v3.RequestMethod(0) +} + +func (x *HTTPRequestProperties) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *HTTPRequestProperties) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *HTTPRequestProperties) GetPort() *wrapperspb.UInt32Value { + if x != nil { + return x.Port + } + return nil +} + +func (x *HTTPRequestProperties) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HTTPRequestProperties) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *HTTPRequestProperties) GetReferer() string { + if x != nil { + return x.Referer + } + return "" +} + +func (x *HTTPRequestProperties) GetForwardedFor() string { + if x != nil { + return x.ForwardedFor + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *HTTPRequestProperties) GetOriginalPath() string { + if x != nil { + return x.OriginalPath + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestHeadersBytes() uint64 { + if x != nil { + return x.RequestHeadersBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestBodyBytes() uint64 { + if x != nil { + return x.RequestBodyBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestHeaders() map[string]string { + if x != nil { + return x.RequestHeaders + } + return nil +} + +func (x *HTTPRequestProperties) GetUpstreamHeaderBytesSent() uint64 { + if x != nil { + return x.UpstreamHeaderBytesSent + } + return 0 +} + +func (x *HTTPRequestProperties) GetDownstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.DownstreamHeaderBytesReceived + } + return 0 +} + +// [#next-free-field: 9] +type HTTPResponseProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP response code returned by Envoy. + ResponseCode *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + // Size of the HTTP response headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include protocol overhead or overhead from framing or encoding at other networking layers. + ResponseHeadersBytes uint64 `protobuf:"varint,2,opt,name=response_headers_bytes,json=responseHeadersBytes,proto3" json:"response_headers_bytes,omitempty"` + // Size of the HTTP response body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + ResponseBodyBytes uint64 `protobuf:"varint,3,opt,name=response_body_bytes,json=responseBodyBytes,proto3" json:"response_body_bytes,omitempty"` + // Map of additional headers configured to be logged. + ResponseHeaders map[string]string `protobuf:"bytes,4,rep,name=response_headers,json=responseHeaders,proto3" json:"response_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map of trailers configured to be logged. + ResponseTrailers map[string]string `protobuf:"bytes,5,rep,name=response_trailers,json=responseTrailers,proto3" json:"response_trailers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The HTTP response code details. + ResponseCodeDetails string `protobuf:"bytes,6,opt,name=response_code_details,json=responseCodeDetails,proto3" json:"response_code_details,omitempty"` + // Number of header bytes received from the upstream by the http stream, including protocol overhead. + UpstreamHeaderBytesReceived uint64 `protobuf:"varint,7,opt,name=upstream_header_bytes_received,json=upstreamHeaderBytesReceived,proto3" json:"upstream_header_bytes_received,omitempty"` + // Number of header bytes sent to the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesSent uint64 `protobuf:"varint,8,opt,name=downstream_header_bytes_sent,json=downstreamHeaderBytesSent,proto3" json:"downstream_header_bytes_sent,omitempty"` +} + +func (x *HTTPResponseProperties) Reset() { + *x = HTTPResponseProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPResponseProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPResponseProperties) ProtoMessage() {} + +func (x *HTTPResponseProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPResponseProperties.ProtoReflect.Descriptor instead. +func (*HTTPResponseProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} +} + +func (x *HTTPResponseProperties) GetResponseCode() *wrapperspb.UInt32Value { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseHeadersBytes() uint64 { + if x != nil { + return x.ResponseHeadersBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseBodyBytes() uint64 { + if x != nil { + return x.ResponseBodyBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseHeaders() map[string]string { + if x != nil { + return x.ResponseHeaders + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseTrailers() map[string]string { + if x != nil { + return x.ResponseTrailers + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseCodeDetails() string { + if x != nil { + return x.ResponseCodeDetails + } + return "" +} + +func (x *HTTPResponseProperties) GetUpstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.UpstreamHeaderBytesReceived + } + return 0 +} + +func (x *HTTPResponseProperties) GetDownstreamHeaderBytesSent() uint64 { + if x != nil { + return x.DownstreamHeaderBytesSent + } + return 0 +} + +type ResponseFlags_Unauthorized struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason ResponseFlags_Unauthorized_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=envoy.data.accesslog.v3.ResponseFlags_Unauthorized_Reason" json:"reason,omitempty"` +} + +func (x *ResponseFlags_Unauthorized) Reset() { + *x = ResponseFlags_Unauthorized{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags_Unauthorized) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags_Unauthorized) ProtoMessage() {} + +func (x *ResponseFlags_Unauthorized) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized.ProtoReflect.Descriptor instead. +func (*ResponseFlags_Unauthorized) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ResponseFlags_Unauthorized) GetReason() ResponseFlags_Unauthorized_Reason { + if x != nil { + return x.Reason + } + return ResponseFlags_Unauthorized_REASON_UNSPECIFIED +} + +type TLSProperties_CertificateProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SANs present in the certificate. + SubjectAltName []*TLSProperties_CertificateProperties_SubjectAltName `protobuf:"bytes,1,rep,name=subject_alt_name,json=subjectAltName,proto3" json:"subject_alt_name,omitempty"` + // The subject field of the certificate. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // The issuer field of the certificate. + Issuer string `protobuf:"bytes,3,opt,name=issuer,proto3" json:"issuer,omitempty"` +} + +func (x *TLSProperties_CertificateProperties) Reset() { + *x = TLSProperties_CertificateProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *TLSProperties_CertificateProperties) GetSubjectAltName() []*TLSProperties_CertificateProperties_SubjectAltName { + if x != nil { + return x.SubjectAltName + } + return nil +} + +func (x *TLSProperties_CertificateProperties) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *TLSProperties_CertificateProperties) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +type TLSProperties_CertificateProperties_SubjectAltName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to San: + // + // *TLSProperties_CertificateProperties_SubjectAltName_Uri + // *TLSProperties_CertificateProperties_SubjectAltName_Dns + San isTLSProperties_CertificateProperties_SubjectAltName_San `protobuf_oneof:"san"` +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) Reset() { + *x = TLSProperties_CertificateProperties_SubjectAltName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties_SubjectAltName) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties_SubjectAltName.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties_SubjectAltName) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) GetSan() isTLSProperties_CertificateProperties_SubjectAltName_San { + if m != nil { + return m.San + } + return nil +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetUri() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + return x.Uri + } + return "" +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetDns() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + return x.Dns + } + return "" +} + +type isTLSProperties_CertificateProperties_SubjectAltName_San interface { + isTLSProperties_CertificateProperties_SubjectAltName_San() +} + +type TLSProperties_CertificateProperties_SubjectAltName_Uri struct { + Uri string `protobuf:"bytes,1,opt,name=uri,proto3,oneof"` +} + +type TLSProperties_CertificateProperties_SubjectAltName_Dns struct { + // [#not-implemented-hide:] + Dns string `protobuf:"bytes,2,opt,name=dns,proto3,oneof"` +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Uri) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Dns) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +var File_envoy_data_accesslog_v3_accesslog_proto protoreflect.FileDescriptor + +var file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x10, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x62, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x14, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xf0, 0x03, 0x0a, 0x12, 0x48, 0x54, 0x54, 0x50, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, + 0x11, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x55, 0x0a, 0x0b, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, + 0x31, 0x30, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x31, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x33, 0x10, 0x04, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x91, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x6e, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, + 0x65, 0x6e, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, + 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x15, + 0x0a, 0x0f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x17, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x57, 0x0a, 0x18, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x16, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, + 0x0d, 0x74, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x14, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x78, + 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, + 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, + 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, + 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, + 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x74, + 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5e, 0x0a, 0x1f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x17, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x53, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x4d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x21, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x20, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, + 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x72, + 0x0a, 0x14, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x59, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x35, 0x0a, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, + 0x65, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x16, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x4d, 0x0a, 0x23, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x64, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x1a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1d, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, + 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x1e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, + 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x37, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x0f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x5b, 0x0a, 0x17, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xa1, 0x0e, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x6e, 0x6f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x12, 0x38, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x12, 0x3e, 0x0a, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x46, 0x0a, 0x1f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x76, + 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x49, 0x6e, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x69, 0x6e, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x12, 0x66, 0x0a, + 0x14, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, + 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x52, 0x13, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4a, + 0x0a, 0x21, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x2e, 0x0a, + 0x13, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, + 0x1d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x3a, 0x0a, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x24, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x1a, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x16, 0x6e, 0x6f, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6e, 0x6f, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, + 0x0a, 0x10, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x1a, 0xd5, 0x01, 0x0a, 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x52, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, + 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, + 0x01, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, + 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, + 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xc5, 0x08, 0x0a, 0x0d, 0x54, + 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x53, 0x6e, 0x69, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x7e, 0x0a, 0x1c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x1b, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x19, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6a, 0x61, 0x33, 0x5f, 0x66, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6a, 0x61, 0x33, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x1a, + 0x99, 0x03, 0x0a, 0x15, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x1a, 0x92, 0x01, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x3a, 0x51, 0x9a, + 0xc5, 0x88, 0x1e, 0x4c, 0x0a, 0x4a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x42, 0x05, 0x0a, 0x03, 0x73, 0x61, 0x6e, 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, 0x0a, 0x0a, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x56, 0x45, 0x52, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, + 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xd9, 0x06, 0x0a, 0x15, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, + 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x3b, 0x0a, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x20, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, + 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa0, + 0x06, 0x0a, 0x16, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, + 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0x72, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, + 0x74, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, + 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2a, 0xcb, 0x02, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x74, 0x10, 0x00, 0x12, + 0x18, 0x0a, 0x14, 0x54, 0x63, 0x70, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x63, 0x70, + 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x63, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x05, 0x12, 0x11, 0x0a, + 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x6f, 0x6c, + 0x52, 0x65, 0x61, 0x64, 0x79, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x08, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x09, 0x12, 0x2b, + 0x0a, 0x27, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x75, 0x6e, 0x6e, + 0x65, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, + 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x10, 0x0a, 0x12, 0x1e, 0x0a, 0x1a, 0x55, + 0x64, 0x70, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0x0b, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x64, 0x70, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x0c, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x64, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x10, 0x0d, 0x42, + 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce sync.Once + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = file_envoy_data_accesslog_v3_accesslog_proto_rawDesc +) + +func file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_data_accesslog_v3_accesslog_proto_rawDescData) + }) + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescData +} + +var file_envoy_data_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_envoy_data_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_data_accesslog_v3_accesslog_proto_goTypes = []interface{}{ + (AccessLogType)(0), // 0: envoy.data.accesslog.v3.AccessLogType + (HTTPAccessLogEntry_HTTPVersion)(0), // 1: envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + (ResponseFlags_Unauthorized_Reason)(0), // 2: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + (TLSProperties_TLSVersion)(0), // 3: envoy.data.accesslog.v3.TLSProperties.TLSVersion + (*TCPAccessLogEntry)(nil), // 4: envoy.data.accesslog.v3.TCPAccessLogEntry + (*HTTPAccessLogEntry)(nil), // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry + (*ConnectionProperties)(nil), // 6: envoy.data.accesslog.v3.ConnectionProperties + (*AccessLogCommon)(nil), // 7: envoy.data.accesslog.v3.AccessLogCommon + (*ResponseFlags)(nil), // 8: envoy.data.accesslog.v3.ResponseFlags + (*TLSProperties)(nil), // 9: envoy.data.accesslog.v3.TLSProperties + (*HTTPRequestProperties)(nil), // 10: envoy.data.accesslog.v3.HTTPRequestProperties + (*HTTPResponseProperties)(nil), // 11: envoy.data.accesslog.v3.HTTPResponseProperties + nil, // 12: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + nil, // 13: envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + (*ResponseFlags_Unauthorized)(nil), // 14: envoy.data.accesslog.v3.ResponseFlags.Unauthorized + (*TLSProperties_CertificateProperties)(nil), // 15: envoy.data.accesslog.v3.TLSProperties.CertificateProperties + (*TLSProperties_CertificateProperties_SubjectAltName)(nil), // 16: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + nil, // 17: envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + nil, // 18: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + nil, // 19: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + (*v3.Address)(nil), // 20: envoy.config.core.v3.Address + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*v3.Metadata)(nil), // 23: envoy.config.core.v3.Metadata + (*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value + (v3.RequestMethod)(0), // 25: envoy.config.core.v3.RequestMethod + (*anypb.Any)(nil), // 26: google.protobuf.Any +} +var file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = []int32{ + 7, // 0: envoy.data.accesslog.v3.TCPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 6, // 1: envoy.data.accesslog.v3.TCPAccessLogEntry.connection_properties:type_name -> envoy.data.accesslog.v3.ConnectionProperties + 7, // 2: envoy.data.accesslog.v3.HTTPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 1, // 3: envoy.data.accesslog.v3.HTTPAccessLogEntry.protocol_version:type_name -> envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + 10, // 4: envoy.data.accesslog.v3.HTTPAccessLogEntry.request:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties + 11, // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry.response:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties + 20, // 6: envoy.data.accesslog.v3.AccessLogCommon.downstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 7: envoy.data.accesslog.v3.AccessLogCommon.downstream_local_address:type_name -> envoy.config.core.v3.Address + 9, // 8: envoy.data.accesslog.v3.AccessLogCommon.tls_properties:type_name -> envoy.data.accesslog.v3.TLSProperties + 21, // 9: envoy.data.accesslog.v3.AccessLogCommon.start_time:type_name -> google.protobuf.Timestamp + 22, // 10: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_rx_byte:type_name -> google.protobuf.Duration + 22, // 11: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 12: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 13: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 14: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 15: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_downstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 16: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_downstream_tx_byte:type_name -> google.protobuf.Duration + 20, // 17: envoy.data.accesslog.v3.AccessLogCommon.upstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 18: envoy.data.accesslog.v3.AccessLogCommon.upstream_local_address:type_name -> envoy.config.core.v3.Address + 8, // 19: envoy.data.accesslog.v3.AccessLogCommon.response_flags:type_name -> envoy.data.accesslog.v3.ResponseFlags + 23, // 20: envoy.data.accesslog.v3.AccessLogCommon.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 21: envoy.data.accesslog.v3.AccessLogCommon.downstream_direct_remote_address:type_name -> envoy.config.core.v3.Address + 12, // 22: envoy.data.accesslog.v3.AccessLogCommon.filter_state_objects:type_name -> envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + 13, // 23: envoy.data.accesslog.v3.AccessLogCommon.custom_tags:type_name -> envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + 22, // 24: envoy.data.accesslog.v3.AccessLogCommon.duration:type_name -> google.protobuf.Duration + 0, // 25: envoy.data.accesslog.v3.AccessLogCommon.access_log_type:type_name -> envoy.data.accesslog.v3.AccessLogType + 14, // 26: envoy.data.accesslog.v3.ResponseFlags.unauthorized_details:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized + 3, // 27: envoy.data.accesslog.v3.TLSProperties.tls_version:type_name -> envoy.data.accesslog.v3.TLSProperties.TLSVersion + 24, // 28: envoy.data.accesslog.v3.TLSProperties.tls_cipher_suite:type_name -> google.protobuf.UInt32Value + 15, // 29: envoy.data.accesslog.v3.TLSProperties.local_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 15, // 30: envoy.data.accesslog.v3.TLSProperties.peer_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 25, // 31: envoy.data.accesslog.v3.HTTPRequestProperties.request_method:type_name -> envoy.config.core.v3.RequestMethod + 24, // 32: envoy.data.accesslog.v3.HTTPRequestProperties.port:type_name -> google.protobuf.UInt32Value + 17, // 33: envoy.data.accesslog.v3.HTTPRequestProperties.request_headers:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + 24, // 34: envoy.data.accesslog.v3.HTTPResponseProperties.response_code:type_name -> google.protobuf.UInt32Value + 18, // 35: envoy.data.accesslog.v3.HTTPResponseProperties.response_headers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + 19, // 36: envoy.data.accesslog.v3.HTTPResponseProperties.response_trailers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + 26, // 37: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry.value:type_name -> google.protobuf.Any + 2, // 38: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.reason:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + 16, // 39: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.subject_alt_name:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 40, // [40:40] is the sub-list for extension extendee + 0, // [0:40] is the sub-list for field type_name +} + +func init() { file_envoy_data_accesslog_v3_accesslog_proto_init() } +func file_envoy_data_accesslog_v3_accesslog_proto_init() { + if File_envoy_data_accesslog_v3_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLogCommon); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPRequestProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPResponseProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags_Unauthorized); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties_SubjectAltName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*TLSProperties_CertificateProperties_SubjectAltName_Uri)(nil), + (*TLSProperties_CertificateProperties_SubjectAltName_Dns)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_data_accesslog_v3_accesslog_proto_rawDesc, + NumEnums: 4, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_data_accesslog_v3_accesslog_proto_goTypes, + DependencyIndexes: file_envoy_data_accesslog_v3_accesslog_proto_depIdxs, + EnumInfos: file_envoy_data_accesslog_v3_accesslog_proto_enumTypes, + MessageInfos: file_envoy_data_accesslog_v3_accesslog_proto_msgTypes, + }.Build() + File_envoy_data_accesslog_v3_accesslog_proto = out.File + file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = nil + file_envoy_data_accesslog_v3_accesslog_proto_goTypes = nil + file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go new file mode 100644 index 000000000..7a0fec615 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go @@ -0,0 +1,2257 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RequestMethod(0) +) + +// Validate checks the field values on TCPAccessLogEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TCPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TCPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TCPAccessLogEntryMultiError, or nil if none found. +func (m *TCPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *TCPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TCPAccessLogEntryMultiError(errors) + } + + return nil +} + +// TCPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by TCPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type TCPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TCPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TCPAccessLogEntryMultiError) AllErrors() []error { return m } + +// TCPAccessLogEntryValidationError is the validation error returned by +// TCPAccessLogEntry.Validate if the designated constraints aren't met. +type TCPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TCPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TCPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TCPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TCPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TCPAccessLogEntryValidationError) ErrorName() string { + return "TCPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e TCPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTCPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TCPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TCPAccessLogEntryValidationError{} + +// Validate checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPAccessLogEntryMultiError, or nil if none found. +func (m *HTTPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProtocolVersion + + if all { + switch v := interface{}(m.GetRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetResponse()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponse()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HTTPAccessLogEntryMultiError(errors) + } + + return nil +} + +// HTTPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by HTTPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type HTTPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPAccessLogEntryMultiError) AllErrors() []error { return m } + +// HTTPAccessLogEntryValidationError is the validation error returned by +// HTTPAccessLogEntry.Validate if the designated constraints aren't met. +type HTTPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPAccessLogEntryValidationError) ErrorName() string { + return "HTTPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPAccessLogEntryValidationError{} + +// Validate checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConnectionProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConnectionPropertiesMultiError, or nil if none found. +func (m *ConnectionProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *ConnectionProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ReceivedBytes + + // no validation rules for SentBytes + + if len(errors) > 0 { + return ConnectionPropertiesMultiError(errors) + } + + return nil +} + +// ConnectionPropertiesMultiError is an error wrapping multiple validation +// errors returned by ConnectionProperties.ValidateAll() if the designated +// constraints aren't met. +type ConnectionPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConnectionPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConnectionPropertiesMultiError) AllErrors() []error { return m } + +// ConnectionPropertiesValidationError is the validation error returned by +// ConnectionProperties.Validate if the designated constraints aren't met. +type ConnectionPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConnectionPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConnectionPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConnectionPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConnectionPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConnectionPropertiesValidationError) ErrorName() string { + return "ConnectionPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e ConnectionPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConnectionProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConnectionPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConnectionPropertiesValidationError{} + +// Validate checks the field values on AccessLogCommon with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AccessLogCommon) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLogCommon with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AccessLogCommonMultiError, or nil if none found. +func (m *AccessLogCommon) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLogCommon) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetSampleRate(); val <= 0 || val > 1 { + err := AccessLogCommonValidationError{ + field: "SampleRate", + reason: "value must be inside range (0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDownstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDownstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStartTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStartTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamCluster + + if all { + switch v := interface{}(m.GetResponseFlags()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseFlags()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamTransportFailureReason + + // no validation rules for RouteName + + if all { + switch v := interface{}(m.GetDownstreamDirectRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamDirectRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetFilterStateObjects())) + i := 0 + for key := range m.GetFilterStateObjects() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilterStateObjects()[key] + _ = val + + // no validation rules for FilterStateObjects[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + // no validation rules for CustomTags + + if all { + switch v := interface{}(m.GetDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamRequestAttemptCount + + // no validation rules for ConnectionTerminationDetails + + // no validation rules for StreamId + + // no validation rules for IntermediateLogEntry + + // no validation rules for DownstreamTransportFailureReason + + // no validation rules for DownstreamWireBytesSent + + // no validation rules for DownstreamWireBytesReceived + + // no validation rules for UpstreamWireBytesSent + + // no validation rules for UpstreamWireBytesReceived + + // no validation rules for AccessLogType + + if len(errors) > 0 { + return AccessLogCommonMultiError(errors) + } + + return nil +} + +// AccessLogCommonMultiError is an error wrapping multiple validation errors +// returned by AccessLogCommon.ValidateAll() if the designated constraints +// aren't met. +type AccessLogCommonMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogCommonMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogCommonMultiError) AllErrors() []error { return m } + +// AccessLogCommonValidationError is the validation error returned by +// AccessLogCommon.Validate if the designated constraints aren't met. +type AccessLogCommonValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogCommonValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogCommonValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogCommonValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogCommonValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogCommonValidationError) ErrorName() string { return "AccessLogCommonValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogCommonValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLogCommon.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogCommonValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogCommonValidationError{} + +// Validate checks the field values on ResponseFlags with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResponseFlagsMultiError, or +// nil if none found. +func (m *ResponseFlags) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FailedLocalHealthcheck + + // no validation rules for NoHealthyUpstream + + // no validation rules for UpstreamRequestTimeout + + // no validation rules for LocalReset + + // no validation rules for UpstreamRemoteReset + + // no validation rules for UpstreamConnectionFailure + + // no validation rules for UpstreamConnectionTermination + + // no validation rules for UpstreamOverflow + + // no validation rules for NoRouteFound + + // no validation rules for DelayInjected + + // no validation rules for FaultInjected + + // no validation rules for RateLimited + + if all { + switch v := interface{}(m.GetUnauthorizedDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUnauthorizedDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RateLimitServiceError + + // no validation rules for DownstreamConnectionTermination + + // no validation rules for UpstreamRetryLimitExceeded + + // no validation rules for StreamIdleTimeout + + // no validation rules for InvalidEnvoyRequestHeaders + + // no validation rules for DownstreamProtocolError + + // no validation rules for UpstreamMaxStreamDurationReached + + // no validation rules for ResponseFromCacheFilter + + // no validation rules for NoFilterConfigFound + + // no validation rules for DurationTimeout + + // no validation rules for UpstreamProtocolError + + // no validation rules for NoClusterFound + + // no validation rules for OverloadManager + + // no validation rules for DnsResolutionFailure + + // no validation rules for DownstreamRemoteReset + + if len(errors) > 0 { + return ResponseFlagsMultiError(errors) + } + + return nil +} + +// ResponseFlagsMultiError is an error wrapping multiple validation errors +// returned by ResponseFlags.ValidateAll() if the designated constraints +// aren't met. +type ResponseFlagsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlagsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlagsMultiError) AllErrors() []error { return m } + +// ResponseFlagsValidationError is the validation error returned by +// ResponseFlags.Validate if the designated constraints aren't met. +type ResponseFlagsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlagsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlagsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlagsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlagsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlagsValidationError) ErrorName() string { return "ResponseFlagsValidationError" } + +// Error satisfies the builtin error interface +func (e ResponseFlagsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlagsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlagsValidationError{} + +// Validate checks the field values on TLSProperties with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TLSProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TLSPropertiesMultiError, or +// nil if none found. +func (m *TLSProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TlsVersion + + if all { + switch v := interface{}(m.GetTlsCipherSuite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCipherSuite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSniHostname + + if all { + switch v := interface{}(m.GetLocalCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPeerCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPeerCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSessionId + + // no validation rules for Ja3Fingerprint + + if len(errors) > 0 { + return TLSPropertiesMultiError(errors) + } + + return nil +} + +// TLSPropertiesMultiError is an error wrapping multiple validation errors +// returned by TLSProperties.ValidateAll() if the designated constraints +// aren't met. +type TLSPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSPropertiesMultiError) AllErrors() []error { return m } + +// TLSPropertiesValidationError is the validation error returned by +// TLSProperties.Validate if the designated constraints aren't met. +type TLSPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSPropertiesValidationError) ErrorName() string { return "TLSPropertiesValidationError" } + +// Error satisfies the builtin error interface +func (e TLSPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSPropertiesValidationError{} + +// Validate checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPRequestProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPRequestPropertiesMultiError, or nil if none found. +func (m *HTTPRequestProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPRequestProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := v3.RequestMethod_name[int32(m.GetRequestMethod())]; !ok { + err := HTTPRequestPropertiesValidationError{ + field: "RequestMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Scheme + + // no validation rules for Authority + + if all { + switch v := interface{}(m.GetPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Path + + // no validation rules for UserAgent + + // no validation rules for Referer + + // no validation rules for ForwardedFor + + // no validation rules for RequestId + + // no validation rules for OriginalPath + + // no validation rules for RequestHeadersBytes + + // no validation rules for RequestBodyBytes + + // no validation rules for RequestHeaders + + // no validation rules for UpstreamHeaderBytesSent + + // no validation rules for DownstreamHeaderBytesReceived + + if len(errors) > 0 { + return HTTPRequestPropertiesMultiError(errors) + } + + return nil +} + +// HTTPRequestPropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPRequestProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPRequestPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPRequestPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPRequestPropertiesMultiError) AllErrors() []error { return m } + +// HTTPRequestPropertiesValidationError is the validation error returned by +// HTTPRequestProperties.Validate if the designated constraints aren't met. +type HTTPRequestPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPRequestPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPRequestPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPRequestPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPRequestPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPRequestPropertiesValidationError) ErrorName() string { + return "HTTPRequestPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPRequestPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPRequestProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPRequestPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPRequestPropertiesValidationError{} + +// Validate checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPResponseProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPResponsePropertiesMultiError, or nil if none found. +func (m *HTTPResponseProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPResponseProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResponseCode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseCode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ResponseHeadersBytes + + // no validation rules for ResponseBodyBytes + + // no validation rules for ResponseHeaders + + // no validation rules for ResponseTrailers + + // no validation rules for ResponseCodeDetails + + // no validation rules for UpstreamHeaderBytesReceived + + // no validation rules for DownstreamHeaderBytesSent + + if len(errors) > 0 { + return HTTPResponsePropertiesMultiError(errors) + } + + return nil +} + +// HTTPResponsePropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPResponseProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPResponsePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPResponsePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPResponsePropertiesMultiError) AllErrors() []error { return m } + +// HTTPResponsePropertiesValidationError is the validation error returned by +// HTTPResponseProperties.Validate if the designated constraints aren't met. +type HTTPResponsePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPResponsePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPResponsePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPResponsePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPResponsePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPResponsePropertiesValidationError) ErrorName() string { + return "HTTPResponsePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPResponsePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPResponseProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPResponsePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPResponsePropertiesValidationError{} + +// Validate checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags_Unauthorized) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResponseFlags_UnauthorizedMultiError, or nil if none found. +func (m *ResponseFlags_Unauthorized) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags_Unauthorized) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Reason + + if len(errors) > 0 { + return ResponseFlags_UnauthorizedMultiError(errors) + } + + return nil +} + +// ResponseFlags_UnauthorizedMultiError is an error wrapping multiple +// validation errors returned by ResponseFlags_Unauthorized.ValidateAll() if +// the designated constraints aren't met. +type ResponseFlags_UnauthorizedMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlags_UnauthorizedMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlags_UnauthorizedMultiError) AllErrors() []error { return m } + +// ResponseFlags_UnauthorizedValidationError is the validation error returned +// by ResponseFlags_Unauthorized.Validate if the designated constraints aren't met. +type ResponseFlags_UnauthorizedValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlags_UnauthorizedValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlags_UnauthorizedValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlags_UnauthorizedValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlags_UnauthorizedValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlags_UnauthorizedValidationError) ErrorName() string { + return "ResponseFlags_UnauthorizedValidationError" +} + +// Error satisfies the builtin error interface +func (e ResponseFlags_UnauthorizedValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags_Unauthorized.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlags_UnauthorizedValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlags_UnauthorizedValidationError{} + +// Validate checks the field values on TLSProperties_CertificateProperties with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *TLSProperties_CertificateProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties_CertificateProperties +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// TLSProperties_CertificatePropertiesMultiError, or nil if none found. +func (m *TLSProperties_CertificateProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSubjectAltName() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Subject + + // no validation rules for Issuer + + if len(errors) > 0 { + return TLSProperties_CertificatePropertiesMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificatePropertiesMultiError is an error wrapping multiple +// validation errors returned by +// TLSProperties_CertificateProperties.ValidateAll() if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificatePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificatePropertiesMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificatePropertiesValidationError is the validation error +// returned by TLSProperties_CertificateProperties.Validate if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificatePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSProperties_CertificatePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSProperties_CertificatePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSProperties_CertificatePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificatePropertiesValidationError) ErrorName() string { + return "TLSProperties_CertificatePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificatePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificatePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificatePropertiesValidationError{} + +// Validate checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TLSProperties_CertificateProperties_SubjectAltName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// TLSProperties_CertificateProperties_SubjectAltNameMultiError, or nil if +// none found. +func (m *TLSProperties_CertificateProperties_SubjectAltName) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.San.(type) { + case *TLSProperties_CertificateProperties_SubjectAltName_Uri: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Uri + case *TLSProperties_CertificateProperties_SubjectAltName_Dns: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Dns + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TLSProperties_CertificateProperties_SubjectAltNameMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificateProperties_SubjectAltNameMultiError is an error +// wrapping multiple validation errors returned by +// TLSProperties_CertificateProperties_SubjectAltName.ValidateAll() if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificateProperties_SubjectAltNameValidationError is the +// validation error returned by +// TLSProperties_CertificateProperties_SubjectAltName.Validate if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) ErrorName() string { + return "TLSProperties_CertificateProperties_SubjectAltNameValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties_SubjectAltName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 000000000..c37ec0915 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,2040 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TCPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TCPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Response != nil { + size, err := m.Response.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SentBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SentBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ReceivedBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReceivedBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AccessLogCommon) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogCommon) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogCommon) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessLogType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AccessLogType)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesReceived)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.UpstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.DownstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesReceived)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.DownstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if len(m.DownstreamTransportFailureReason) > 0 { + i -= len(m.DownstreamTransportFailureReason) + copy(dAtA[i:], m.DownstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.IntermediateLogEntry { + i-- + if m.IntermediateLogEntry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if len(m.StreamId) > 0 { + i -= len(m.StreamId) + copy(dAtA[i:], m.StreamId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StreamId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.ConnectionTerminationDetails) > 0 { + i -= len(m.ConnectionTerminationDetails) + copy(dAtA[i:], m.ConnectionTerminationDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionTerminationDetails))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.UpstreamRequestAttemptCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamRequestAttemptCount)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.Duration != nil { + size, err := (*durationpb.Duration)(m.Duration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CustomTags) > 0 { + for k := range m.CustomTags { + v := m.CustomTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.FilterStateObjects) > 0 { + for k := range m.FilterStateObjects { + v := m.FilterStateObjects[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.DownstreamDirectRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamDirectRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if len(m.RouteName) > 0 { + i -= len(m.RouteName) + copy(dAtA[i:], m.RouteName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.UpstreamTransportFailureReason) > 0 { + i -= len(m.UpstreamTransportFailureReason) + copy(dAtA[i:], m.UpstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ResponseFlags != nil { + size, err := m.ResponseFlags.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x7a + } + if m.UpstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.UpstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.TimeToLastDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TimeToFirstDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.TimeToLastUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.TimeToFirstUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.TimeToLastUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeToFirstUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TimeToLastRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.StartTime != nil { + size, err := (*timestamppb.Timestamp)(m.StartTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.TlsProperties != nil { + size, err := m.TlsProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DownstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.DownstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SampleRate != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleRate)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags_Unauthorized) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags_Unauthorized) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags_Unauthorized) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Reason != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Reason)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamRemoteReset { + i-- + if m.DownstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.DnsResolutionFailure { + i-- + if m.DnsResolutionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.OverloadManager { + i-- + if m.OverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.NoClusterFound { + i-- + if m.NoClusterFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.UpstreamProtocolError { + i-- + if m.UpstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.DurationTimeout { + i-- + if m.DurationTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.NoFilterConfigFound { + i-- + if m.NoFilterConfigFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.ResponseFromCacheFilter { + i-- + if m.ResponseFromCacheFilter { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.UpstreamMaxStreamDurationReached { + i-- + if m.UpstreamMaxStreamDurationReached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.DownstreamProtocolError { + i-- + if m.DownstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.InvalidEnvoyRequestHeaders { + i-- + if m.InvalidEnvoyRequestHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.StreamIdleTimeout { + i-- + if m.StreamIdleTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamRetryLimitExceeded { + i-- + if m.UpstreamRetryLimitExceeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DownstreamConnectionTermination { + i-- + if m.DownstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.RateLimitServiceError { + i-- + if m.RateLimitServiceError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.UnauthorizedDetails != nil { + size, err := m.UnauthorizedDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.RateLimited { + i-- + if m.RateLimited { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.FaultInjected { + i-- + if m.FaultInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DelayInjected { + i-- + if m.DelayInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NoRouteFound { + i-- + if m.NoRouteFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.UpstreamOverflow { + i-- + if m.UpstreamOverflow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.UpstreamConnectionTermination { + i-- + if m.UpstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.UpstreamConnectionFailure { + i-- + if m.UpstreamConnectionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.UpstreamRemoteReset { + i-- + if m.UpstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalReset { + i-- + if m.LocalReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.UpstreamRequestTimeout { + i-- + if m.UpstreamRequestTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.NoHealthyUpstream { + i-- + if m.NoHealthyUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedLocalHealthcheck { + i-- + if m.FailedLocalHealthcheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Issuer) > 0 { + i -= len(m.Issuer) + copy(dAtA[i:], m.Issuer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Issuer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subject) > 0 { + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x12 + } + if len(m.SubjectAltName) > 0 { + for iNdEx := len(m.SubjectAltName) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltName[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ja3Fingerprint) > 0 { + i -= len(m.Ja3Fingerprint) + copy(dAtA[i:], m.Ja3Fingerprint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ja3Fingerprint))) + i-- + dAtA[i] = 0x3a + } + if len(m.TlsSessionId) > 0 { + i -= len(m.TlsSessionId) + copy(dAtA[i:], m.TlsSessionId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSessionId))) + i-- + dAtA[i] = 0x32 + } + if m.PeerCertificateProperties != nil { + size, err := m.PeerCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalCertificateProperties != nil { + size, err := m.LocalCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TlsSniHostname) > 0 { + i -= len(m.TlsSniHostname) + copy(dAtA[i:], m.TlsSniHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSniHostname))) + i-- + dAtA[i] = 0x1a + } + if m.TlsCipherSuite != nil { + size, err := (*wrapperspb.UInt32Value)(m.TlsCipherSuite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.TlsVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPRequestProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRequestProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPRequestProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x78 + } + if m.UpstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeaders) > 0 { + for k := range m.RequestHeaders { + v := m.RequestHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if m.RequestBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestBodyBytes)) + i-- + dAtA[i] = 0x60 + } + if m.RequestHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestHeadersBytes)) + i-- + dAtA[i] = 0x58 + } + if len(m.OriginalPath) > 0 { + i -= len(m.OriginalPath) + copy(dAtA[i:], m.OriginalPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OriginalPath))) + i-- + dAtA[i] = 0x52 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0x4a + } + if len(m.ForwardedFor) > 0 { + i -= len(m.ForwardedFor) + copy(dAtA[i:], m.ForwardedFor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ForwardedFor))) + i-- + dAtA[i] = 0x42 + } + if len(m.Referer) > 0 { + i -= len(m.Referer) + copy(dAtA[i:], m.Referer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Referer))) + i-- + dAtA[i] = 0x3a + } + if len(m.UserAgent) > 0 { + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x32 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x2a + } + if m.Port != nil { + size, err := (*wrapperspb.UInt32Value)(m.Port).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x1a + } + if len(m.Scheme) > 0 { + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scheme))) + i-- + dAtA[i] = 0x12 + } + if m.RequestMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestMethod)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPResponseProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPResponseProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPResponseProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x40 + } + if m.UpstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x38 + } + if len(m.ResponseCodeDetails) > 0 { + i -= len(m.ResponseCodeDetails) + copy(dAtA[i:], m.ResponseCodeDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseCodeDetails))) + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseTrailers) > 0 { + for k := range m.ResponseTrailers { + v := m.ResponseTrailers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeaders) > 0 { + for k := range m.ResponseHeaders { + v := m.ResponseHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.ResponseBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseBodyBytes)) + i-- + dAtA[i] = 0x18 + } + if m.ResponseHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseHeadersBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ResponseCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.ResponseCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TCPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ProtocolVersion)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Response != nil { + l = m.Response.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReceivedBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ReceivedBytes)) + } + if m.SentBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SentBytes)) + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogCommon) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleRate != 0 { + n += 9 + } + if m.DownstreamRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamLocalAddress != nil { + if size, ok := interface{}(m.DownstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsProperties != nil { + l = m.TlsProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartTime != nil { + l = (*timestamppb.Timestamp)(m.StartTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRemoteAddress != nil { + if size, ok := interface{}(m.UpstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamLocalAddress != nil { + if size, ok := interface{}(m.UpstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseFlags != nil { + l = m.ResponseFlags.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamDirectRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamDirectRemoteAddress) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterStateObjects) > 0 { + for k, v := range m.FilterStateObjects { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.CustomTags) > 0 { + for k, v := range m.CustomTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Duration != nil { + l = (*durationpb.Duration)(m.Duration).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRequestAttemptCount != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamRequestAttemptCount)) + } + l = len(m.ConnectionTerminationDetails) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StreamId) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntermediateLogEntry { + n += 3 + } + l = len(m.DownstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesSent)) + } + if m.DownstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesReceived)) + } + if m.UpstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesSent)) + } + if m.UpstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesReceived)) + } + if m.AccessLogType != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.AccessLogType)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags_Unauthorized) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Reason != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Reason)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedLocalHealthcheck { + n += 2 + } + if m.NoHealthyUpstream { + n += 2 + } + if m.UpstreamRequestTimeout { + n += 2 + } + if m.LocalReset { + n += 2 + } + if m.UpstreamRemoteReset { + n += 2 + } + if m.UpstreamConnectionFailure { + n += 2 + } + if m.UpstreamConnectionTermination { + n += 2 + } + if m.UpstreamOverflow { + n += 2 + } + if m.NoRouteFound { + n += 2 + } + if m.DelayInjected { + n += 2 + } + if m.FaultInjected { + n += 2 + } + if m.RateLimited { + n += 2 + } + if m.UnauthorizedDetails != nil { + l = m.UnauthorizedDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitServiceError { + n += 2 + } + if m.DownstreamConnectionTermination { + n += 2 + } + if m.UpstreamRetryLimitExceeded { + n += 3 + } + if m.StreamIdleTimeout { + n += 3 + } + if m.InvalidEnvoyRequestHeaders { + n += 3 + } + if m.DownstreamProtocolError { + n += 3 + } + if m.UpstreamMaxStreamDurationReached { + n += 3 + } + if m.ResponseFromCacheFilter { + n += 3 + } + if m.NoFilterConfigFound { + n += 3 + } + if m.DurationTimeout { + n += 3 + } + if m.UpstreamProtocolError { + n += 3 + } + if m.NoClusterFound { + n += 3 + } + if m.OverloadManager { + n += 3 + } + if m.DnsResolutionFailure { + n += 3 + } + if m.DownstreamRemoteReset { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.San.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SubjectAltName) > 0 { + for _, e := range m.SubjectAltName { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Issuer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsVersion)) + } + if m.TlsCipherSuite != nil { + l = (*wrapperspb.UInt32Value)(m.TlsCipherSuite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSniHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalCertificateProperties != nil { + l = m.LocalCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PeerCertificateProperties != nil { + l = m.PeerCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSessionId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Ja3Fingerprint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPRequestProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestMethod)) + } + l = len(m.Scheme) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != nil { + l = (*wrapperspb.UInt32Value)(m.Port).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgent) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Referer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ForwardedFor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OriginalPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestHeadersBytes)) + } + if m.RequestBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestBodyBytes)) + } + if len(m.RequestHeaders) > 0 { + for k, v := range m.RequestHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.UpstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesSent)) + } + if m.DownstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesReceived)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPResponseProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseCode != nil { + l = (*wrapperspb.UInt32Value)(m.ResponseCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseHeadersBytes)) + } + if m.ResponseBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseBodyBytes)) + } + if len(m.ResponseHeaders) > 0 { + for k, v := range m.ResponseHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.ResponseTrailers) > 0 { + for k, v := range m.ResponseTrailers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseCodeDetails) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesReceived)) + } + if m.DownstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesSent)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go new file mode 100644 index 000000000..ed75102d4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the aggregate cluster. See the :ref:`architecture overview +// ` for more information. +// [#extension: envoy.clusters.aggregate] +type ClusterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they + // appear in this list. + Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` +} + +func (x *ClusterConfig) Reset() { + *x = ClusterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterConfig) ProtoMessage() {} + +func (x *ClusterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterConfig.ProtoReflect.Descriptor instead. +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterConfig) GetClusters() []string { + if x != nil { + return x.Clusters + } + return nil +} + +var File_envoy_extensions_clusters_aggregate_v3_cluster_proto protoreflect.FileDescriptor + +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x72, 0x0a, 0x0d, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x08, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xa9, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x34, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescOnce sync.Once + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData = file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc +) + +func file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescGZIP() []byte { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescOnce.Do(func() { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData) + }) + return file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData +} + +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes = []interface{}{ + (*ClusterConfig)(nil), // 0: envoy.extensions.clusters.aggregate.v3.ClusterConfig +} +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_clusters_aggregate_v3_cluster_proto_init() } +func file_envoy_extensions_clusters_aggregate_v3_cluster_proto_init() { + if File_envoy_extensions_clusters_aggregate_v3_cluster_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes, + DependencyIndexes: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs, + MessageInfos: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes, + }.Build() + File_envoy_extensions_clusters_aggregate_v3_cluster_proto = out.File + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc = nil + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes = nil + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go new file mode 100644 index 000000000..44fb2c71f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go @@ -0,0 +1,148 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterConfigMultiError, or +// nil if none found. +func (m *ClusterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetClusters()) < 1 { + err := ClusterConfigValidationError{ + field: "Clusters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ClusterConfigMultiError(errors) + } + + return nil +} + +// ClusterConfigMultiError is an error wrapping multiple validation errors +// returned by ClusterConfig.ValidateAll() if the designated constraints +// aren't met. +type ClusterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterConfigMultiError) AllErrors() []error { return m } + +// ClusterConfigValidationError is the validation error returned by +// ClusterConfig.Validate if the designated constraints aren't met. +type ClusterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterConfigValidationError) ErrorName() string { return "ClusterConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go new file mode 100644 index 000000000..a3f22bf13 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go new file mode 100644 index 000000000..13e47ea83 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go @@ -0,0 +1,622 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FaultDelay_FaultDelayType int32 + +const ( + // Unused and deprecated. + FaultDelay_FIXED FaultDelay_FaultDelayType = 0 +) + +// Enum value maps for FaultDelay_FaultDelayType. +var ( + FaultDelay_FaultDelayType_name = map[int32]string{ + 0: "FIXED", + } + FaultDelay_FaultDelayType_value = map[string]int32{ + "FIXED": 0, + } +) + +func (x FaultDelay_FaultDelayType) Enum() *FaultDelay_FaultDelayType { + p := new(FaultDelay_FaultDelayType) + *p = x + return p +} + +func (x FaultDelay_FaultDelayType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FaultDelay_FaultDelayType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes[0].Descriptor() +} + +func (FaultDelay_FaultDelayType) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes[0] +} + +func (x FaultDelay_FaultDelayType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FaultDelay_FaultDelayType.Descriptor instead. +func (FaultDelay_FaultDelayType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +// Delay specification is used to inject latency into the +// HTTP/Mongo operation. +// [#next-free-field: 6] +type FaultDelay struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to FaultDelaySecifier: + // + // *FaultDelay_FixedDelay + // *FaultDelay_HeaderDelay_ + FaultDelaySecifier isFaultDelay_FaultDelaySecifier `protobuf_oneof:"fault_delay_secifier"` + // The percentage of operations/connections/requests on which the delay will be injected. + Percentage *v3.FractionalPercent `protobuf:"bytes,4,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultDelay) Reset() { + *x = FaultDelay{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultDelay) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultDelay) ProtoMessage() {} + +func (x *FaultDelay) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultDelay.ProtoReflect.Descriptor instead. +func (*FaultDelay) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0} +} + +func (m *FaultDelay) GetFaultDelaySecifier() isFaultDelay_FaultDelaySecifier { + if m != nil { + return m.FaultDelaySecifier + } + return nil +} + +func (x *FaultDelay) GetFixedDelay() *durationpb.Duration { + if x, ok := x.GetFaultDelaySecifier().(*FaultDelay_FixedDelay); ok { + return x.FixedDelay + } + return nil +} + +func (x *FaultDelay) GetHeaderDelay() *FaultDelay_HeaderDelay { + if x, ok := x.GetFaultDelaySecifier().(*FaultDelay_HeaderDelay_); ok { + return x.HeaderDelay + } + return nil +} + +func (x *FaultDelay) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultDelay_FaultDelaySecifier interface { + isFaultDelay_FaultDelaySecifier() +} + +type FaultDelay_FixedDelay struct { + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + // delay will be injected before a new request/operation. + // This is required if type is FIXED. + FixedDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=fixed_delay,json=fixedDelay,proto3,oneof"` +} + +type FaultDelay_HeaderDelay_ struct { + // Fault delays are controlled via an HTTP header (if applicable). + HeaderDelay *FaultDelay_HeaderDelay `protobuf:"bytes,5,opt,name=header_delay,json=headerDelay,proto3,oneof"` +} + +func (*FaultDelay_FixedDelay) isFaultDelay_FaultDelaySecifier() {} + +func (*FaultDelay_HeaderDelay_) isFaultDelay_FaultDelaySecifier() {} + +// Describes a rate limit to be applied. +type FaultRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LimitType: + // + // *FaultRateLimit_FixedLimit_ + // *FaultRateLimit_HeaderLimit_ + LimitType isFaultRateLimit_LimitType `protobuf_oneof:"limit_type"` + // The percentage of operations/connections/requests on which the rate limit will be injected. + Percentage *v3.FractionalPercent `protobuf:"bytes,2,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultRateLimit) Reset() { + *x = FaultRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit) ProtoMessage() {} + +func (x *FaultRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1} +} + +func (m *FaultRateLimit) GetLimitType() isFaultRateLimit_LimitType { + if m != nil { + return m.LimitType + } + return nil +} + +func (x *FaultRateLimit) GetFixedLimit() *FaultRateLimit_FixedLimit { + if x, ok := x.GetLimitType().(*FaultRateLimit_FixedLimit_); ok { + return x.FixedLimit + } + return nil +} + +func (x *FaultRateLimit) GetHeaderLimit() *FaultRateLimit_HeaderLimit { + if x, ok := x.GetLimitType().(*FaultRateLimit_HeaderLimit_); ok { + return x.HeaderLimit + } + return nil +} + +func (x *FaultRateLimit) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultRateLimit_LimitType interface { + isFaultRateLimit_LimitType() +} + +type FaultRateLimit_FixedLimit_ struct { + // A fixed rate limit. + FixedLimit *FaultRateLimit_FixedLimit `protobuf:"bytes,1,opt,name=fixed_limit,json=fixedLimit,proto3,oneof"` +} + +type FaultRateLimit_HeaderLimit_ struct { + // Rate limits are controlled via an HTTP header (if applicable). + HeaderLimit *FaultRateLimit_HeaderLimit `protobuf:"bytes,3,opt,name=header_limit,json=headerLimit,proto3,oneof"` +} + +func (*FaultRateLimit_FixedLimit_) isFaultRateLimit_LimitType() {} + +func (*FaultRateLimit_HeaderLimit_) isFaultRateLimit_LimitType() {} + +// Fault delays are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultDelay_HeaderDelay struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultDelay_HeaderDelay) Reset() { + *x = FaultDelay_HeaderDelay{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultDelay_HeaderDelay) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultDelay_HeaderDelay) ProtoMessage() {} + +func (x *FaultDelay_HeaderDelay) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultDelay_HeaderDelay.ProtoReflect.Descriptor instead. +func (*FaultDelay_HeaderDelay) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +// Describes a fixed/constant rate limit. +type FaultRateLimit_FixedLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The limit supplied in KiB/s. + LimitKbps uint64 `protobuf:"varint,1,opt,name=limit_kbps,json=limitKbps,proto3" json:"limit_kbps,omitempty"` +} + +func (x *FaultRateLimit_FixedLimit) Reset() { + *x = FaultRateLimit_FixedLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit_FixedLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit_FixedLimit) ProtoMessage() {} + +func (x *FaultRateLimit_FixedLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit_FixedLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit_FixedLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *FaultRateLimit_FixedLimit) GetLimitKbps() uint64 { + if x != nil { + return x.LimitKbps + } + return 0 +} + +// Rate limits are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultRateLimit_HeaderLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultRateLimit_HeaderLimit) Reset() { + *x = FaultRateLimit_HeaderLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit_HeaderLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit_HeaderLimit) ProtoMessage() {} + +func (x *FaultRateLimit_HeaderLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit_HeaderLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit_HeaderLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1, 1} +} + +var File_envoy_extensions_filters_common_fault_v3_fault_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, + 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, + 0x00, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x78, 0x65, 0x64, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, + 0x65, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0x49, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, + 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x65, + 0x6c, 0x61, 0x79, 0x22, 0x1b, 0x0a, 0x0e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x00, + 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x42, 0x1b, 0x0a, 0x14, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, + 0x73, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, + 0xb0, 0x04, 0x0a, 0x0e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x66, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x0a, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x69, 0x0a, 0x0c, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0x73, 0x0a, 0x0a, 0x46, 0x69, 0x78, 0x65, 0x64, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6b, + 0x62, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x28, 0x01, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x4b, 0x62, 0x70, 0x73, 0x3a, 0x3d, 0x9a, + 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, + 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x4d, 0x0a, 0x0b, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, + 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x32, 0x9a, 0xc5, 0x88, + 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, + 0x11, 0x0a, 0x0a, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0xa7, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x36, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData = file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc +) + +func file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData) + }) + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData +} + +var file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes = []interface{}{ + (FaultDelay_FaultDelayType)(0), // 0: envoy.extensions.filters.common.fault.v3.FaultDelay.FaultDelayType + (*FaultDelay)(nil), // 1: envoy.extensions.filters.common.fault.v3.FaultDelay + (*FaultRateLimit)(nil), // 2: envoy.extensions.filters.common.fault.v3.FaultRateLimit + (*FaultDelay_HeaderDelay)(nil), // 3: envoy.extensions.filters.common.fault.v3.FaultDelay.HeaderDelay + (*FaultRateLimit_FixedLimit)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultRateLimit.FixedLimit + (*FaultRateLimit_HeaderLimit)(nil), // 5: envoy.extensions.filters.common.fault.v3.FaultRateLimit.HeaderLimit + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*v3.FractionalPercent)(nil), // 7: envoy.type.v3.FractionalPercent +} +var file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs = []int32{ + 6, // 0: envoy.extensions.filters.common.fault.v3.FaultDelay.fixed_delay:type_name -> google.protobuf.Duration + 3, // 1: envoy.extensions.filters.common.fault.v3.FaultDelay.header_delay:type_name -> envoy.extensions.filters.common.fault.v3.FaultDelay.HeaderDelay + 7, // 2: envoy.extensions.filters.common.fault.v3.FaultDelay.percentage:type_name -> envoy.type.v3.FractionalPercent + 4, // 3: envoy.extensions.filters.common.fault.v3.FaultRateLimit.fixed_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit.FixedLimit + 5, // 4: envoy.extensions.filters.common.fault.v3.FaultRateLimit.header_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit.HeaderLimit + 7, // 5: envoy.extensions.filters.common.fault.v3.FaultRateLimit.percentage:type_name -> envoy.type.v3.FractionalPercent + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_common_fault_v3_fault_proto_init() } +func file_envoy_extensions_filters_common_fault_v3_fault_proto_init() { + if File_envoy_extensions_filters_common_fault_v3_fault_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultDelay); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultDelay_HeaderDelay); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit_FixedLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit_HeaderLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FaultDelay_FixedDelay)(nil), + (*FaultDelay_HeaderDelay_)(nil), + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*FaultRateLimit_FixedLimit_)(nil), + (*FaultRateLimit_HeaderLimit_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs, + EnumInfos: file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes, + MessageInfos: file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_common_fault_v3_fault_proto = out.File + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc = nil + file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes = nil + file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go new file mode 100644 index 000000000..7387e6d02 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go @@ -0,0 +1,812 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FaultDelay with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultDelay) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultDelay with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultDelayMultiError, or +// nil if none found. +func (m *FaultDelay) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultDelay) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofFaultDelaySecifierPresent := false + switch v := m.FaultDelaySecifier.(type) { + case *FaultDelay_FixedDelay: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true + + if d := m.GetFixedDelay(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = FaultDelayValidationError{ + field: "FixedDelay", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := FaultDelayValidationError{ + field: "FixedDelay", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + case *FaultDelay_HeaderDelay_: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFaultDelaySecifierPresent { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultDelayMultiError(errors) + } + + return nil +} + +// FaultDelayMultiError is an error wrapping multiple validation errors +// returned by FaultDelay.ValidateAll() if the designated constraints aren't met. +type FaultDelayMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultDelayMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultDelayMultiError) AllErrors() []error { return m } + +// FaultDelayValidationError is the validation error returned by +// FaultDelay.Validate if the designated constraints aren't met. +type FaultDelayValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultDelayValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultDelayValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultDelayValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultDelayValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultDelayValidationError) ErrorName() string { return "FaultDelayValidationError" } + +// Error satisfies the builtin error interface +func (e FaultDelayValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultDelay.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultDelayValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultDelayValidationError{} + +// Validate checks the field values on FaultRateLimit with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultRateLimitMultiError, +// or nil if none found. +func (m *FaultRateLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofLimitTypePresent := false + switch v := m.LimitType.(type) { + case *FaultRateLimit_FixedLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true + + if all { + switch v := interface{}(m.GetFixedLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFixedLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *FaultRateLimit_HeaderLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLimitTypePresent { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultRateLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimitMultiError is an error wrapping multiple validation errors +// returned by FaultRateLimit.ValidateAll() if the designated constraints +// aren't met. +type FaultRateLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimitValidationError is the validation error returned by +// FaultRateLimit.Validate if the designated constraints aren't met. +type FaultRateLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimitValidationError) ErrorName() string { return "FaultRateLimitValidationError" } + +// Error satisfies the builtin error interface +func (e FaultRateLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimitValidationError{} + +// Validate checks the field values on FaultDelay_HeaderDelay with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultDelay_HeaderDelay) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultDelay_HeaderDelay with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultDelay_HeaderDelayMultiError, or nil if none found. +func (m *FaultDelay_HeaderDelay) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultDelay_HeaderDelay) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultDelay_HeaderDelayMultiError(errors) + } + + return nil +} + +// FaultDelay_HeaderDelayMultiError is an error wrapping multiple validation +// errors returned by FaultDelay_HeaderDelay.ValidateAll() if the designated +// constraints aren't met. +type FaultDelay_HeaderDelayMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultDelay_HeaderDelayMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultDelay_HeaderDelayMultiError) AllErrors() []error { return m } + +// FaultDelay_HeaderDelayValidationError is the validation error returned by +// FaultDelay_HeaderDelay.Validate if the designated constraints aren't met. +type FaultDelay_HeaderDelayValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultDelay_HeaderDelayValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultDelay_HeaderDelayValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultDelay_HeaderDelayValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultDelay_HeaderDelayValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultDelay_HeaderDelayValidationError) ErrorName() string { + return "FaultDelay_HeaderDelayValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultDelay_HeaderDelayValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultDelay_HeaderDelay.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultDelay_HeaderDelayValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultDelay_HeaderDelayValidationError{} + +// Validate checks the field values on FaultRateLimit_FixedLimit with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit_FixedLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit_FixedLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultRateLimit_FixedLimitMultiError, or nil if none found. +func (m *FaultRateLimit_FixedLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit_FixedLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetLimitKbps() < 1 { + err := FaultRateLimit_FixedLimitValidationError{ + field: "LimitKbps", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultRateLimit_FixedLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimit_FixedLimitMultiError is an error wrapping multiple validation +// errors returned by FaultRateLimit_FixedLimit.ValidateAll() if the +// designated constraints aren't met. +type FaultRateLimit_FixedLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimit_FixedLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimit_FixedLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimit_FixedLimitValidationError is the validation error returned by +// FaultRateLimit_FixedLimit.Validate if the designated constraints aren't met. +type FaultRateLimit_FixedLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimit_FixedLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimit_FixedLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimit_FixedLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimit_FixedLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimit_FixedLimitValidationError) ErrorName() string { + return "FaultRateLimit_FixedLimitValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultRateLimit_FixedLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit_FixedLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimit_FixedLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimit_FixedLimitValidationError{} + +// Validate checks the field values on FaultRateLimit_HeaderLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit_HeaderLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit_HeaderLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultRateLimit_HeaderLimitMultiError, or nil if none found. +func (m *FaultRateLimit_HeaderLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit_HeaderLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultRateLimit_HeaderLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimit_HeaderLimitMultiError is an error wrapping multiple +// validation errors returned by FaultRateLimit_HeaderLimit.ValidateAll() if +// the designated constraints aren't met. +type FaultRateLimit_HeaderLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimit_HeaderLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimit_HeaderLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimit_HeaderLimitValidationError is the validation error returned +// by FaultRateLimit_HeaderLimit.Validate if the designated constraints aren't met. +type FaultRateLimit_HeaderLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimit_HeaderLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimit_HeaderLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimit_HeaderLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimit_HeaderLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimit_HeaderLimitValidationError) ErrorName() string { + return "FaultRateLimit_HeaderLimitValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultRateLimit_HeaderLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit_HeaderLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimit_HeaderLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimit_HeaderLimitValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go new file mode 100644 index 000000000..4a462b40a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,491 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultDelay_HeaderDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay_HeaderDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_HeaderDelay_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_FixedDelay); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay_FixedDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_FixedDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedDelay != nil { + size, err := (*durationpb.Duration)(m.FixedDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderDelay != nil { + size, err := m.HeaderDelay.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_FixedLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_FixedLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LimitKbps != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LimitKbps)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LimitType.(*FaultRateLimit_HeaderLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.LimitType.(*FaultRateLimit_FixedLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedLimit != nil { + size, err := m.FixedLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_HeaderLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderLimit != nil { + size, err := m.HeaderLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FaultDelaySecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay_FixedDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedDelay != nil { + l = (*durationpb.Duration)(m.FixedDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultDelay_HeaderDelay_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderDelay != nil { + l = m.HeaderDelay.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_FixedLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LimitKbps != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LimitKbps)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_HeaderLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LimitType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_FixedLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedLimit != nil { + l = m.FixedLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_HeaderLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderLimit != nil { + l = m.HeaderLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go new file mode 100644 index 000000000..cea58cea9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go @@ -0,0 +1,655 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 6] +type FaultAbort struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ErrorType: + // + // *FaultAbort_HttpStatus + // *FaultAbort_GrpcStatus + // *FaultAbort_HeaderAbort_ + ErrorType isFaultAbort_ErrorType `protobuf_oneof:"error_type"` + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + Percentage *v3.FractionalPercent `protobuf:"bytes,3,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultAbort) Reset() { + *x = FaultAbort{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultAbort) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultAbort) ProtoMessage() {} + +func (x *FaultAbort) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultAbort.ProtoReflect.Descriptor instead. +func (*FaultAbort) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{0} +} + +func (m *FaultAbort) GetErrorType() isFaultAbort_ErrorType { + if m != nil { + return m.ErrorType + } + return nil +} + +func (x *FaultAbort) GetHttpStatus() uint32 { + if x, ok := x.GetErrorType().(*FaultAbort_HttpStatus); ok { + return x.HttpStatus + } + return 0 +} + +func (x *FaultAbort) GetGrpcStatus() uint32 { + if x, ok := x.GetErrorType().(*FaultAbort_GrpcStatus); ok { + return x.GrpcStatus + } + return 0 +} + +func (x *FaultAbort) GetHeaderAbort() *FaultAbort_HeaderAbort { + if x, ok := x.GetErrorType().(*FaultAbort_HeaderAbort_); ok { + return x.HeaderAbort + } + return nil +} + +func (x *FaultAbort) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultAbort_ErrorType interface { + isFaultAbort_ErrorType() +} + +type FaultAbort_HttpStatus struct { + // HTTP status code to use to abort the HTTP request. + HttpStatus uint32 `protobuf:"varint,2,opt,name=http_status,json=httpStatus,proto3,oneof"` +} + +type FaultAbort_GrpcStatus struct { + // gRPC status code to use to abort the gRPC request. + GrpcStatus uint32 `protobuf:"varint,5,opt,name=grpc_status,json=grpcStatus,proto3,oneof"` +} + +type FaultAbort_HeaderAbort_ struct { + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort *FaultAbort_HeaderAbort `protobuf:"bytes,4,opt,name=header_abort,json=headerAbort,proto3,oneof"` +} + +func (*FaultAbort_HttpStatus) isFaultAbort_ErrorType() {} + +func (*FaultAbort_GrpcStatus) isFaultAbort_ErrorType() {} + +func (*FaultAbort_HeaderAbort_) isFaultAbort_ErrorType() {} + +// [#next-free-field: 17] +type HTTPFault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If specified, the filter will inject delays based on the values in the + // object. + Delay *v31.FaultDelay `protobuf:"bytes,1,opt,name=delay,proto3" json:"delay,omitempty"` + // If specified, the filter will abort requests based on the values in + // the object. At least “abort“ or “delay“ must be specified. + Abort *FaultAbort `protobuf:"bytes,2,opt,name=abort,proto3" json:"abort,omitempty"` + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + UpstreamCluster string `protobuf:"bytes,3,opt,name=upstream_cluster,json=upstreamCluster,proto3" json:"upstream_cluster,omitempty"` + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the “value“ field is not in the config). + Headers []*v32.HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + DownstreamNodes []string `protobuf:"bytes,5,rep,name=downstream_nodes,json=downstreamNodes,proto3" json:"downstream_nodes,omitempty"` + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // “runtime “ and any faults that are not injected + // due to overflow will be indicated via the “faults_overflow + // “ stat. + // + // .. attention:: + // + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + MaxActiveFaults *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_active_faults,json=maxActiveFaults,proto3" json:"max_active_faults,omitempty"` + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + ResponseRateLimit *v31.FaultRateLimit `protobuf:"bytes,7,opt,name=response_rate_limit,json=responseRateLimit,proto3" json:"response_rate_limit,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + DelayPercentRuntime string `protobuf:"bytes,8,opt,name=delay_percent_runtime,json=delayPercentRuntime,proto3" json:"delay_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + AbortPercentRuntime string `protobuf:"bytes,9,opt,name=abort_percent_runtime,json=abortPercentRuntime,proto3" json:"abort_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + DelayDurationRuntime string `protobuf:"bytes,10,opt,name=delay_duration_runtime,json=delayDurationRuntime,proto3" json:"delay_duration_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + AbortHttpStatusRuntime string `protobuf:"bytes,11,opt,name=abort_http_status_runtime,json=abortHttpStatusRuntime,proto3" json:"abort_http_status_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + MaxActiveFaultsRuntime string `protobuf:"bytes,12,opt,name=max_active_faults_runtime,json=maxActiveFaultsRuntime,proto3" json:"max_active_faults_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + ResponseRateLimitPercentRuntime string `protobuf:"bytes,13,opt,name=response_rate_limit_percent_runtime,json=responseRateLimitPercentRuntime,proto3" json:"response_rate_limit_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + AbortGrpcStatusRuntime string `protobuf:"bytes,14,opt,name=abort_grpc_status_runtime,json=abortGrpcStatusRuntime,proto3" json:"abort_grpc_status_runtime,omitempty"` + // To control whether stats storage is allocated dynamically for each downstream server. + // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. + // If set to false, dynamic stats storage will be allocated for the downstream cluster name. + // Default value is false. + DisableDownstreamClusterStats bool `protobuf:"varint,15,opt,name=disable_downstream_cluster_stats,json=disableDownstreamClusterStats,proto3" json:"disable_downstream_cluster_stats,omitempty"` + // When an abort or delay fault is executed, the metadata struct provided here will be added to the + // request's dynamic metadata under the namespace corresponding to the name of the fault filter. + // This data can be logged as part of Access Logs using the :ref:`command operator + // ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of + // the fault filter. + FilterMetadata *structpb.Struct `protobuf:"bytes,16,opt,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty"` +} + +func (x *HTTPFault) Reset() { + *x = HTTPFault{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPFault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPFault) ProtoMessage() {} + +func (x *HTTPFault) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPFault.ProtoReflect.Descriptor instead. +func (*HTTPFault) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPFault) GetDelay() *v31.FaultDelay { + if x != nil { + return x.Delay + } + return nil +} + +func (x *HTTPFault) GetAbort() *FaultAbort { + if x != nil { + return x.Abort + } + return nil +} + +func (x *HTTPFault) GetUpstreamCluster() string { + if x != nil { + return x.UpstreamCluster + } + return "" +} + +func (x *HTTPFault) GetHeaders() []*v32.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HTTPFault) GetDownstreamNodes() []string { + if x != nil { + return x.DownstreamNodes + } + return nil +} + +func (x *HTTPFault) GetMaxActiveFaults() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxActiveFaults + } + return nil +} + +func (x *HTTPFault) GetResponseRateLimit() *v31.FaultRateLimit { + if x != nil { + return x.ResponseRateLimit + } + return nil +} + +func (x *HTTPFault) GetDelayPercentRuntime() string { + if x != nil { + return x.DelayPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortPercentRuntime() string { + if x != nil { + return x.AbortPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetDelayDurationRuntime() string { + if x != nil { + return x.DelayDurationRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortHttpStatusRuntime() string { + if x != nil { + return x.AbortHttpStatusRuntime + } + return "" +} + +func (x *HTTPFault) GetMaxActiveFaultsRuntime() string { + if x != nil { + return x.MaxActiveFaultsRuntime + } + return "" +} + +func (x *HTTPFault) GetResponseRateLimitPercentRuntime() string { + if x != nil { + return x.ResponseRateLimitPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortGrpcStatusRuntime() string { + if x != nil { + return x.AbortGrpcStatusRuntime + } + return "" +} + +func (x *HTTPFault) GetDisableDownstreamClusterStats() bool { + if x != nil { + return x.DisableDownstreamClusterStats + } + return false +} + +func (x *HTTPFault) GetFilterMetadata() *structpb.Struct { + if x != nil { + return x.FilterMetadata + } + return nil +} + +// Fault aborts are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultAbort_HeaderAbort struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultAbort_HeaderAbort) Reset() { + *x = FaultAbort_HeaderAbort{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultAbort_HeaderAbort) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultAbort_HeaderAbort) ProtoMessage() {} + +func (x *FaultAbort_HeaderAbort) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultAbort_HeaderAbort.ProtoReflect.Descriptor instead. +func (*FaultAbort_HeaderAbort) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +var File_envoy_extensions_filters_http_fault_v3_fault_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, + 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, + 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x63, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, + 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x48, 0x00, + 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x40, 0x0a, + 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, + 0x4e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, 0x3f, + 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, + 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, + 0x62, 0x6f, 0x72, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xc7, 0x08, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x4a, 0x0a, 0x05, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x61, 0x62, 0x6f, 0x72, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x61, 0x62, 0x6f, 0x72, + 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x68, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x61, + 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x62, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x61, 0x62, 0x6f, + 0x72, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x61, 0x62, + 0x6f, 0x72, 0x74, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x4c, 0x0a, 0x23, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x19, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, + 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x42, 0xa3, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x34, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x55, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData = file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc +) + +func file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData +} + +var file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = []interface{}{ + (*FaultAbort)(nil), // 0: envoy.extensions.filters.http.fault.v3.FaultAbort + (*HTTPFault)(nil), // 1: envoy.extensions.filters.http.fault.v3.HTTPFault + (*FaultAbort_HeaderAbort)(nil), // 2: envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort + (*v3.FractionalPercent)(nil), // 3: envoy.type.v3.FractionalPercent + (*v31.FaultDelay)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultDelay + (*v32.HeaderMatcher)(nil), // 5: envoy.config.route.v3.HeaderMatcher + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*v31.FaultRateLimit)(nil), // 7: envoy.extensions.filters.common.fault.v3.FaultRateLimit + (*structpb.Struct)(nil), // 8: google.protobuf.Struct +} +var file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.fault.v3.FaultAbort.header_abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort + 3, // 1: envoy.extensions.filters.http.fault.v3.FaultAbort.percentage:type_name -> envoy.type.v3.FractionalPercent + 4, // 2: envoy.extensions.filters.http.fault.v3.HTTPFault.delay:type_name -> envoy.extensions.filters.common.fault.v3.FaultDelay + 0, // 3: envoy.extensions.filters.http.fault.v3.HTTPFault.abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort + 5, // 4: envoy.extensions.filters.http.fault.v3.HTTPFault.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 6, // 5: envoy.extensions.filters.http.fault.v3.HTTPFault.max_active_faults:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.extensions.filters.http.fault.v3.HTTPFault.response_rate_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit + 8, // 7: envoy.extensions.filters.http.fault.v3.HTTPFault.filter_metadata:type_name -> google.protobuf.Struct + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_fault_v3_fault_proto_init() } +func file_envoy_extensions_filters_http_fault_v3_fault_proto_init() { + if File_envoy_extensions_filters_http_fault_v3_fault_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultAbort); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPFault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultAbort_HeaderAbort); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FaultAbort_HttpStatus)(nil), + (*FaultAbort_GrpcStatus)(nil), + (*FaultAbort_HeaderAbort_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_fault_v3_fault_proto = out.File + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc = nil + file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = nil + file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go new file mode 100644 index 000000000..f3cc33072 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go @@ -0,0 +1,658 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FaultAbort with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultAbort) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultAbort with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultAbortMultiError, or +// nil if none found. +func (m *FaultAbort) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultAbort) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofErrorTypePresent := false + switch v := m.ErrorType.(type) { + case *FaultAbort_HttpStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + + if val := m.GetHttpStatus(); val < 200 || val >= 600 { + err := FaultAbortValidationError{ + field: "HttpStatus", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *FaultAbort_GrpcStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + // no validation rules for GrpcStatus + case *FaultAbort_HeaderAbort_: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderAbort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderAbort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofErrorTypePresent { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultAbortMultiError(errors) + } + + return nil +} + +// FaultAbortMultiError is an error wrapping multiple validation errors +// returned by FaultAbort.ValidateAll() if the designated constraints aren't met. +type FaultAbortMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultAbortMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultAbortMultiError) AllErrors() []error { return m } + +// FaultAbortValidationError is the validation error returned by +// FaultAbort.Validate if the designated constraints aren't met. +type FaultAbortValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultAbortValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultAbortValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultAbortValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultAbortValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultAbortValidationError) ErrorName() string { return "FaultAbortValidationError" } + +// Error satisfies the builtin error interface +func (e FaultAbortValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultAbort.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultAbortValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultAbortValidationError{} + +// Validate checks the field values on HTTPFault with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HTTPFault) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPFault with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HTTPFaultMultiError, or nil +// if none found. +func (m *HTTPFault) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPFault) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAbort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAbort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamCluster + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxActiveFaults()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxActiveFaults()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetResponseRateLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseRateLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DelayPercentRuntime + + // no validation rules for AbortPercentRuntime + + // no validation rules for DelayDurationRuntime + + // no validation rules for AbortHttpStatusRuntime + + // no validation rules for MaxActiveFaultsRuntime + + // no validation rules for ResponseRateLimitPercentRuntime + + // no validation rules for AbortGrpcStatusRuntime + + // no validation rules for DisableDownstreamClusterStats + + if all { + switch v := interface{}(m.GetFilterMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HTTPFaultMultiError(errors) + } + + return nil +} + +// HTTPFaultMultiError is an error wrapping multiple validation errors returned +// by HTTPFault.ValidateAll() if the designated constraints aren't met. +type HTTPFaultMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPFaultMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPFaultMultiError) AllErrors() []error { return m } + +// HTTPFaultValidationError is the validation error returned by +// HTTPFault.Validate if the designated constraints aren't met. +type HTTPFaultValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPFaultValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPFaultValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPFaultValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPFaultValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPFaultValidationError) ErrorName() string { return "HTTPFaultValidationError" } + +// Error satisfies the builtin error interface +func (e HTTPFaultValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPFault.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPFaultValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPFaultValidationError{} + +// Validate checks the field values on FaultAbort_HeaderAbort with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultAbort_HeaderAbort) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultAbort_HeaderAbort with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultAbort_HeaderAbortMultiError, or nil if none found. +func (m *FaultAbort_HeaderAbort) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultAbort_HeaderAbort) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultAbort_HeaderAbortMultiError(errors) + } + + return nil +} + +// FaultAbort_HeaderAbortMultiError is an error wrapping multiple validation +// errors returned by FaultAbort_HeaderAbort.ValidateAll() if the designated +// constraints aren't met. +type FaultAbort_HeaderAbortMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultAbort_HeaderAbortMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultAbort_HeaderAbortMultiError) AllErrors() []error { return m } + +// FaultAbort_HeaderAbortValidationError is the validation error returned by +// FaultAbort_HeaderAbort.Validate if the designated constraints aren't met. +type FaultAbort_HeaderAbortValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultAbort_HeaderAbortValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultAbort_HeaderAbortValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultAbort_HeaderAbortValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultAbort_HeaderAbortValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultAbort_HeaderAbortValidationError) ErrorName() string { + return "FaultAbort_HeaderAbortValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultAbort_HeaderAbortValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultAbort_HeaderAbort.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultAbort_HeaderAbortValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultAbort_HeaderAbortValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go new file mode 100644 index 000000000..8baeafe14 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultAbort_HeaderAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort_HeaderAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ErrorType.(*FaultAbort_GrpcStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ErrorType.(*FaultAbort_HeaderAbort_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ErrorType.(*FaultAbort_HttpStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HttpStatus)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *FaultAbort_HeaderAbort_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderAbort != nil { + size, err := m.HeaderAbort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *FaultAbort_GrpcStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_GrpcStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.GrpcStatus)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HTTPFault) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPFault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FilterMetadata != nil { + size, err := (*structpb.Struct)(m.FilterMetadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.DisableDownstreamClusterStats { + i-- + if m.DisableDownstreamClusterStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if len(m.AbortGrpcStatusRuntime) > 0 { + i -= len(m.AbortGrpcStatusRuntime) + copy(dAtA[i:], m.AbortGrpcStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortGrpcStatusRuntime))) + i-- + dAtA[i] = 0x72 + } + if len(m.ResponseRateLimitPercentRuntime) > 0 { + i -= len(m.ResponseRateLimitPercentRuntime) + copy(dAtA[i:], m.ResponseRateLimitPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseRateLimitPercentRuntime))) + i-- + dAtA[i] = 0x6a + } + if len(m.MaxActiveFaultsRuntime) > 0 { + i -= len(m.MaxActiveFaultsRuntime) + copy(dAtA[i:], m.MaxActiveFaultsRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxActiveFaultsRuntime))) + i-- + dAtA[i] = 0x62 + } + if len(m.AbortHttpStatusRuntime) > 0 { + i -= len(m.AbortHttpStatusRuntime) + copy(dAtA[i:], m.AbortHttpStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortHttpStatusRuntime))) + i-- + dAtA[i] = 0x5a + } + if len(m.DelayDurationRuntime) > 0 { + i -= len(m.DelayDurationRuntime) + copy(dAtA[i:], m.DelayDurationRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayDurationRuntime))) + i-- + dAtA[i] = 0x52 + } + if len(m.AbortPercentRuntime) > 0 { + i -= len(m.AbortPercentRuntime) + copy(dAtA[i:], m.AbortPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortPercentRuntime))) + i-- + dAtA[i] = 0x4a + } + if len(m.DelayPercentRuntime) > 0 { + i -= len(m.DelayPercentRuntime) + copy(dAtA[i:], m.DelayPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayPercentRuntime))) + i-- + dAtA[i] = 0x42 + } + if m.ResponseRateLimit != nil { + if vtmsg, ok := interface{}(m.ResponseRateLimit).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseRateLimit) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxActiveFaults != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxActiveFaults).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.DownstreamNodes) > 0 { + for iNdEx := len(m.DownstreamNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DownstreamNodes[iNdEx]) + copy(dAtA[i:], m.DownstreamNodes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamNodes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x1a + } + if m.Abort != nil { + size, err := m.Abort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Delay != nil { + if vtmsg, ok := interface{}(m.Delay).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Delay) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HeaderAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ErrorType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort_HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.HttpStatus)) + return n +} +func (m *FaultAbort_HeaderAbort_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderAbort != nil { + l = m.HeaderAbort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultAbort_GrpcStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.GrpcStatus)) + return n +} +func (m *HTTPFault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delay != nil { + if size, ok := interface{}(m.Delay).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Delay) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Abort != nil { + l = m.Abort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DownstreamNodes) > 0 { + for _, s := range m.DownstreamNodes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxActiveFaults != nil { + l = (*wrapperspb.UInt32Value)(m.MaxActiveFaults).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseRateLimit != nil { + if size, ok := interface{}(m.ResponseRateLimit).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ResponseRateLimit) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayDurationRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortHttpStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxActiveFaultsRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseRateLimitPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortGrpcStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableDownstreamClusterStats { + n += 2 + } + if m.FilterMetadata != nil { + l = (*structpb.Struct)(m.FilterMetadata).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go new file mode 100644 index 000000000..bcf5c7d40 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v31 "github.com/cncf/xds/go/xds/type/matcher/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// RBAC filter config. +// [#next-free-field: 8] +type RBAC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + // If present and empty, DENY. + // If both rules and matcher are configured, rules will be ignored. + Rules *v3.RBAC `protobuf:"bytes,1,opt,name=rules,proto3" json:"rules,omitempty"` + // If specified, rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // rules. + RulesStatPrefix string `protobuf:"bytes,6,opt,name=rules_stat_prefix,json=rulesStatPrefix,proto3" json:"rules_stat_prefix,omitempty"` + // The match tree to use when resolving RBAC action for incoming requests. Requests do not + // match any matcher will be denied. + // If absent, no enforcing RBAC matcher will be applied. + // If present and empty, deny all requests. + Matcher *v31.Matcher `protobuf:"bytes,4,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + // If both shadow rules and shadow matcher are configured, shadow rules will be ignored. + ShadowRules *v3.RBAC `protobuf:"bytes,2,opt,name=shadow_rules,json=shadowRules,proto3" json:"shadow_rules,omitempty"` + // The match tree to use for emitting stats and logs which can be used for rule testing for + // incoming requests. + // If absent, no shadow matcher will be applied. + ShadowMatcher *v31.Matcher `protobuf:"bytes,5,opt,name=shadow_matcher,json=shadowMatcher,proto3" json:"shadow_matcher,omitempty"` + // If specified, shadow rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // shadow rules. + ShadowRulesStatPrefix string `protobuf:"bytes,3,opt,name=shadow_rules_stat_prefix,json=shadowRulesStatPrefix,proto3" json:"shadow_rules_stat_prefix,omitempty"` + // If track_per_rule_stats is true, counters will be published for each rule and shadow rule. + TrackPerRuleStats bool `protobuf:"varint,7,opt,name=track_per_rule_stats,json=trackPerRuleStats,proto3" json:"track_per_rule_stats,omitempty"` +} + +func (x *RBAC) Reset() { + *x = RBAC{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC) ProtoMessage() {} + +func (x *RBAC) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC.ProtoReflect.Descriptor instead. +func (*RBAC) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP(), []int{0} +} + +func (x *RBAC) GetRules() *v3.RBAC { + if x != nil { + return x.Rules + } + return nil +} + +func (x *RBAC) GetRulesStatPrefix() string { + if x != nil { + return x.RulesStatPrefix + } + return "" +} + +func (x *RBAC) GetMatcher() *v31.Matcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *RBAC) GetShadowRules() *v3.RBAC { + if x != nil { + return x.ShadowRules + } + return nil +} + +func (x *RBAC) GetShadowMatcher() *v31.Matcher { + if x != nil { + return x.ShadowMatcher + } + return nil +} + +func (x *RBAC) GetShadowRulesStatPrefix() string { + if x != nil { + return x.ShadowRulesStatPrefix + } + return "" +} + +func (x *RBAC) GetTrackPerRuleStats() bool { + if x != nil { + return x.TrackPerRuleStats + } + return false +} + +type RBACPerRoute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. + Rbac *RBAC `protobuf:"bytes,2,opt,name=rbac,proto3" json:"rbac,omitempty"` +} + +func (x *RBACPerRoute) Reset() { + *x = RBACPerRoute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBACPerRoute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBACPerRoute) ProtoMessage() {} + +func (x *RBACPerRoute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBACPerRoute.ProtoReflect.Descriptor instead. +func (*RBACPerRoute) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP(), []int{1} +} + +func (x *RBACPerRoute) GetRbac() *RBAC { + if x != nil { + return x.Rbac + } + return nil +} + +var File_envoy_extensions_filters_http_rbac_v3_rbac_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xba, 0x04, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x49, 0x0a, 0x05, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x57, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x1f, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0c, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x42, 0x1e, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, + 0x6f, 0x77, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x26, + 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2f, + 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x74, 0x72, + 0x61, 0x63, 0x6b, 0x50, 0x65, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, + 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x22, 0x8b, 0x01, + 0x0a, 0x0c, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x3f, + 0x0a, 0x04, 0x72, 0x62, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, + 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x52, 0x04, 0x72, 0x62, 0x61, 0x63, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x9f, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x33, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData = file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc +) + +func file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData +} + +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes = []interface{}{ + (*RBAC)(nil), // 0: envoy.extensions.filters.http.rbac.v3.RBAC + (*RBACPerRoute)(nil), // 1: envoy.extensions.filters.http.rbac.v3.RBACPerRoute + (*v3.RBAC)(nil), // 2: envoy.config.rbac.v3.RBAC + (*v31.Matcher)(nil), // 3: xds.type.matcher.v3.Matcher +} +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.rbac.v3.RBAC.rules:type_name -> envoy.config.rbac.v3.RBAC + 3, // 1: envoy.extensions.filters.http.rbac.v3.RBAC.matcher:type_name -> xds.type.matcher.v3.Matcher + 2, // 2: envoy.extensions.filters.http.rbac.v3.RBAC.shadow_rules:type_name -> envoy.config.rbac.v3.RBAC + 3, // 3: envoy.extensions.filters.http.rbac.v3.RBAC.shadow_matcher:type_name -> xds.type.matcher.v3.Matcher + 0, // 4: envoy.extensions.filters.http.rbac.v3.RBACPerRoute.rbac:type_name -> envoy.extensions.filters.http.rbac.v3.RBAC + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_rbac_v3_rbac_proto_init() } +func file_envoy_extensions_filters_http_rbac_v3_rbac_proto_init() { + if File_envoy_extensions_filters_http_rbac_v3_rbac_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBACPerRoute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_rbac_v3_rbac_proto = out.File + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc = nil + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes = nil + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go new file mode 100644 index 000000000..1d820564b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go @@ -0,0 +1,385 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RBAC with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *RBAC) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RBACMultiError, or nil if none found. +func (m *RBAC) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RulesStatPrefix + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ShadowRulesStatPrefix + + // no validation rules for TrackPerRuleStats + + if len(errors) > 0 { + return RBACMultiError(errors) + } + + return nil +} + +// RBACMultiError is an error wrapping multiple validation errors returned by +// RBAC.ValidateAll() if the designated constraints aren't met. +type RBACMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACMultiError) AllErrors() []error { return m } + +// RBACValidationError is the validation error returned by RBAC.Validate if the +// designated constraints aren't met. +type RBACValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACValidationError) ErrorName() string { return "RBACValidationError" } + +// Error satisfies the builtin error interface +func (e RBACValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACValidationError{} + +// Validate checks the field values on RBACPerRoute with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RBACPerRoute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBACPerRoute with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RBACPerRouteMultiError, or +// nil if none found. +func (m *RBACPerRoute) ValidateAll() error { + return m.validate(true) +} + +func (m *RBACPerRoute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRbac()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRbac()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RBACPerRouteMultiError(errors) + } + + return nil +} + +// RBACPerRouteMultiError is an error wrapping multiple validation errors +// returned by RBACPerRoute.ValidateAll() if the designated constraints aren't met. +type RBACPerRouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACPerRouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACPerRouteMultiError) AllErrors() []error { return m } + +// RBACPerRouteValidationError is the validation error returned by +// RBACPerRoute.Validate if the designated constraints aren't met. +type RBACPerRouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACPerRouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACPerRouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACPerRouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACPerRouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACPerRouteValidationError) ErrorName() string { return "RBACPerRouteValidationError" } + +// Error satisfies the builtin error interface +func (e RBACPerRouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBACPerRoute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACPerRouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACPerRouteValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 000000000..1e9cd9a06 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,283 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrackPerRuleStats { + i-- + if m.TrackPerRuleStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.RulesStatPrefix) > 0 { + i -= len(m.RulesStatPrefix) + copy(dAtA[i:], m.RulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RulesStatPrefix))) + i-- + dAtA[i] = 0x32 + } + if m.ShadowMatcher != nil { + if vtmsg, ok := interface{}(m.ShadowMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ShadowRulesStatPrefix) > 0 { + i -= len(m.ShadowRulesStatPrefix) + copy(dAtA[i:], m.ShadowRulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ShadowRulesStatPrefix))) + i-- + dAtA[i] = 0x1a + } + if m.ShadowRules != nil { + if vtmsg, ok := interface{}(m.ShadowRules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowRules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Rules != nil { + if vtmsg, ok := interface{}(m.Rules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Rules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBACPerRoute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBACPerRoute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBACPerRoute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Rbac != nil { + size, err := m.Rbac.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rules != nil { + if size, ok := interface{}(m.Rules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Rules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowRules != nil { + if size, ok := interface{}(m.ShadowRules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowRules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ShadowRulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowMatcher != nil { + if size, ok := interface{}(m.ShadowMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackPerRuleStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBACPerRoute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rbac != nil { + l = m.Rbac.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go new file mode 100644 index 000000000..01a368894 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go @@ -0,0 +1,469 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 10] +type Router struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + DynamicStats *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=dynamic_stats,json=dynamicStats,proto3" json:"dynamic_stats,omitempty"` + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + // + // .. attention:: + // + // This field is deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/http/router/v3/router.proto. + StartChildSpan bool `protobuf:"varint,2,opt,name=start_child_span,json=startChildSpan,proto3" json:"start_child_span,omitempty"` + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + UpstreamLog []*v3.AccessLog `protobuf:"bytes,3,rep,name=upstream_log,json=upstreamLog,proto3" json:"upstream_log,omitempty"` + // Additional upstream access log options. + UpstreamLogOptions *Router_UpstreamAccessLogOptions `protobuf:"bytes,9,opt,name=upstream_log_options,json=upstreamLogOptions,proto3" json:"upstream_log_options,omitempty"` + // Do not add any additional “x-envoy-“ headers to requests or responses. This + // only affects the :ref:`router filter generated x-envoy- headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set “x-envoy-“ headers. + SuppressEnvoyHeaders bool `protobuf:"varint,4,opt,name=suppress_envoy_headers,json=suppressEnvoyHeaders,proto3" json:"suppress_envoy_headers,omitempty"` + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + StrictCheckHeaders []string `protobuf:"bytes,5,rep,name=strict_check_headers,json=strictCheckHeaders,proto3" json:"strict_check_headers,omitempty"` + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + RespectExpectedRqTimeout bool `protobuf:"varint,6,opt,name=respect_expected_rq_timeout,json=respectExpectedRqTimeout,proto3" json:"respect_expected_rq_timeout,omitempty"` + // If set, Envoy will avoid incrementing HTTP failure code stats + // on gRPC requests. This includes the individual status code value + // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). + // This field is useful if interested in relying only on the gRPC + // stats filter to define success and failure metrics for gRPC requests + // as not all failed gRPC requests charge HTTP status code metrics. See + // :ref:`gRPC stats filter` documentation + // for more details. + SuppressGrpcRequestFailureCodeStats bool `protobuf:"varint,7,opt,name=suppress_grpc_request_failure_code_stats,json=suppressGrpcRequestFailureCodeStats,proto3" json:"suppress_grpc_request_failure_code_stats,omitempty"` + // .. note:: + // + // Upstream HTTP filters are currently in alpha. + // + // Optional HTTP filters for the upstream HTTP filter chain. + // + // These filters will be applied for all requests that pass through the router. + // They will also be applied to shadowed requests. + // Upstream HTTP filters cannot change route or cluster. + // Upstream HTTP filters specified on the cluster will override these filters. + // + // If using upstream HTTP filters, please be aware that local errors sent by + // upstream HTTP filters will not trigger retries, and local errors sent by + // upstream HTTP filters will count as a final response if hedging is configured. + // [#extension-category: envoy.filters.http.upstream] + UpstreamHttpFilters []*v31.HttpFilter `protobuf:"bytes,8,rep,name=upstream_http_filters,json=upstreamHttpFilters,proto3" json:"upstream_http_filters,omitempty"` +} + +func (x *Router) Reset() { + *x = Router{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Router) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Router) ProtoMessage() {} + +func (x *Router) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Router.ProtoReflect.Descriptor instead. +func (*Router) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0} +} + +func (x *Router) GetDynamicStats() *wrapperspb.BoolValue { + if x != nil { + return x.DynamicStats + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/http/router/v3/router.proto. +func (x *Router) GetStartChildSpan() bool { + if x != nil { + return x.StartChildSpan + } + return false +} + +func (x *Router) GetUpstreamLog() []*v3.AccessLog { + if x != nil { + return x.UpstreamLog + } + return nil +} + +func (x *Router) GetUpstreamLogOptions() *Router_UpstreamAccessLogOptions { + if x != nil { + return x.UpstreamLogOptions + } + return nil +} + +func (x *Router) GetSuppressEnvoyHeaders() bool { + if x != nil { + return x.SuppressEnvoyHeaders + } + return false +} + +func (x *Router) GetStrictCheckHeaders() []string { + if x != nil { + return x.StrictCheckHeaders + } + return nil +} + +func (x *Router) GetRespectExpectedRqTimeout() bool { + if x != nil { + return x.RespectExpectedRqTimeout + } + return false +} + +func (x *Router) GetSuppressGrpcRequestFailureCodeStats() bool { + if x != nil { + return x.SuppressGrpcRequestFailureCodeStats + } + return false +} + +func (x *Router) GetUpstreamHttpFilters() []*v31.HttpFilter { + if x != nil { + return x.UpstreamHttpFilters + } + return nil +} + +type Router_UpstreamAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, an upstream access log will be recorded when an upstream stream is + // associated to an http request. Note: Each HTTP request received for an already established + // connection will result in an upstream access log record. This includes, for example, + // consecutive HTTP requests over the same connection or a request that is retried. + // In case a retry is applied, an upstream access log will be recorded for each retry. + FlushUpstreamLogOnUpstreamStream bool `protobuf:"varint,1,opt,name=flush_upstream_log_on_upstream_stream,json=flushUpstreamLogOnUpstreamStream,proto3" json:"flush_upstream_log_on_upstream_stream,omitempty"` + // The interval to flush the upstream access logs. By default, the router will flush an upstream + // access log on stream close, when the HTTP request is complete. If this field is set, the router + // will flush access logs periodically at the specified interval. This is especially useful in the + // case of long-lived requests, such as CONNECT and Websockets. + // The interval must be at least 1 millisecond. + UpstreamLogFlushInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=upstream_log_flush_interval,json=upstreamLogFlushInterval,proto3" json:"upstream_log_flush_interval,omitempty"` +} + +func (x *Router_UpstreamAccessLogOptions) Reset() { + *x = Router_UpstreamAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Router_UpstreamAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Router_UpstreamAccessLogOptions) ProtoMessage() {} + +func (x *Router_UpstreamAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Router_UpstreamAccessLogOptions.ProtoReflect.Descriptor instead. +func (*Router_UpstreamAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Router_UpstreamAccessLogOptions) GetFlushUpstreamLogOnUpstreamStream() bool { + if x != nil { + return x.FlushUpstreamLogOnUpstreamStream + } + return false +} + +func (x *Router_UpstreamAccessLogOptions) GetUpstreamLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.UpstreamLogFlushInterval + } + return nil +} + +var File_envoy_extensions_filters_http_router_v3_router_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x59, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x08, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x35, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, + 0x68, 0x69, 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x0b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, + 0x67, 0x12, 0x7a, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, + 0x16, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, + 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0xc7, 0x01, 0x0a, 0x14, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x94, 0x01, 0xfa, 0x42, 0x90, 0x01, 0x92, 0x01, 0x8c, 0x01, 0x22, 0x89, 0x01, + 0x72, 0x86, 0x01, 0x52, 0x1e, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x2d, 0x6d, 0x73, 0x52, 0x26, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x70, 0x65, 0x72, 0x2d, 0x74, 0x72, 0x79, + 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, 0x6d, 0x73, 0x52, 0x13, 0x78, 0x2d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x6d, 0x61, 0x78, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x52, 0x15, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, + 0x67, 0x72, 0x70, 0x63, 0x2d, 0x6f, 0x6e, 0x52, 0x10, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x74, 0x72, 0x69, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, + 0x1b, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x72, 0x71, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x18, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x52, 0x71, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x55, 0x0a, 0x28, + 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x23, + 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x7b, 0x0a, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x13, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x1a, 0xd3, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, + 0x25, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, + 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x66, + 0x0a, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, + 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x18, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x42, 0xa7, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x35, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData = file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc +) + +func file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData +} + +var file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = []interface{}{ + (*Router)(nil), // 0: envoy.extensions.filters.http.router.v3.Router + (*Router_UpstreamAccessLogOptions)(nil), // 1: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*v3.AccessLog)(nil), // 3: envoy.config.accesslog.v3.AccessLog + (*v31.HttpFilter)(nil), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration +} +var file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.router.v3.Router.dynamic_stats:type_name -> google.protobuf.BoolValue + 3, // 1: envoy.extensions.filters.http.router.v3.Router.upstream_log:type_name -> envoy.config.accesslog.v3.AccessLog + 1, // 2: envoy.extensions.filters.http.router.v3.Router.upstream_log_options:type_name -> envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + 4, // 3: envoy.extensions.filters.http.router.v3.Router.upstream_http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 5, // 4: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions.upstream_log_flush_interval:type_name -> google.protobuf.Duration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_router_v3_router_proto_init() } +func file_envoy_extensions_filters_http_router_v3_router_proto_init() { + if File_envoy_extensions_filters_http_router_v3_router_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Router); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Router_UpstreamAccessLogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_router_v3_router_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_router_v3_router_proto = out.File + file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = nil + file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = nil + file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go new file mode 100644 index 000000000..151afcaa3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go @@ -0,0 +1,428 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Router with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Router) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Router with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RouterMultiError, or nil if none found. +func (m *Router) ValidateAll() error { + return m.validate(true) +} + +func (m *Router) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDynamicStats()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicStats()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StartChildSpan + + for idx, item := range m.GetUpstreamLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUpstreamLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SuppressEnvoyHeaders + + for idx, item := range m.GetStrictCheckHeaders() { + _, _ = idx, item + + if _, ok := _Router_StrictCheckHeaders_InLookup[item]; !ok { + err := RouterValidationError{ + field: fmt.Sprintf("StrictCheckHeaders[%v]", idx), + reason: "value must be in list [x-envoy-upstream-rq-timeout-ms x-envoy-upstream-rq-per-try-timeout-ms x-envoy-max-retries x-envoy-retry-grpc-on x-envoy-retry-on]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for RespectExpectedRqTimeout + + // no validation rules for SuppressGrpcRequestFailureCodeStats + + for idx, item := range m.GetUpstreamHttpFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouterMultiError(errors) + } + + return nil +} + +// RouterMultiError is an error wrapping multiple validation errors returned by +// Router.ValidateAll() if the designated constraints aren't met. +type RouterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouterMultiError) AllErrors() []error { return m } + +// RouterValidationError is the validation error returned by Router.Validate if +// the designated constraints aren't met. +type RouterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouterValidationError) ErrorName() string { return "RouterValidationError" } + +// Error satisfies the builtin error interface +func (e RouterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouterValidationError{} + +var _Router_StrictCheckHeaders_InLookup = map[string]struct{}{ + "x-envoy-upstream-rq-timeout-ms": {}, + "x-envoy-upstream-rq-per-try-timeout-ms": {}, + "x-envoy-max-retries": {}, + "x-envoy-retry-grpc-on": {}, + "x-envoy-retry-on": {}, +} + +// Validate checks the field values on Router_UpstreamAccessLogOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Router_UpstreamAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Router_UpstreamAccessLogOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Router_UpstreamAccessLogOptionsMultiError, or nil if none found. +func (m *Router_UpstreamAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Router_UpstreamAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FlushUpstreamLogOnUpstreamStream + + if d := m.GetUpstreamLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Router_UpstreamAccessLogOptionsMultiError(errors) + } + + return nil +} + +// Router_UpstreamAccessLogOptionsMultiError is an error wrapping multiple +// validation errors returned by Router_UpstreamAccessLogOptions.ValidateAll() +// if the designated constraints aren't met. +type Router_UpstreamAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Router_UpstreamAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Router_UpstreamAccessLogOptionsMultiError) AllErrors() []error { return m } + +// Router_UpstreamAccessLogOptionsValidationError is the validation error +// returned by Router_UpstreamAccessLogOptions.Validate if the designated +// constraints aren't met. +type Router_UpstreamAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Router_UpstreamAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Router_UpstreamAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Router_UpstreamAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Router_UpstreamAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Router_UpstreamAccessLogOptionsValidationError) ErrorName() string { + return "Router_UpstreamAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Router_UpstreamAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouter_UpstreamAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Router_UpstreamAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Router_UpstreamAccessLogOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go new file mode 100644 index 000000000..88105c3e1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go @@ -0,0 +1,302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Router_UpstreamAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.UpstreamLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FlushUpstreamLogOnUpstreamStream { + i-- + if m.FlushUpstreamLogOnUpstreamStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Router) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogOptions != nil { + size, err := m.UpstreamLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.UpstreamHttpFilters) > 0 { + for iNdEx := len(m.UpstreamHttpFilters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamHttpFilters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpFilters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SuppressGrpcRequestFailureCodeStats { + i-- + if m.SuppressGrpcRequestFailureCodeStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RespectExpectedRqTimeout { + i-- + if m.RespectExpectedRqTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.StrictCheckHeaders) > 0 { + for iNdEx := len(m.StrictCheckHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StrictCheckHeaders[iNdEx]) + copy(dAtA[i:], m.StrictCheckHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StrictCheckHeaders[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.SuppressEnvoyHeaders { + i-- + if m.SuppressEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.UpstreamLog) > 0 { + for iNdEx := len(m.UpstreamLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.StartChildSpan { + i-- + if m.StartChildSpan { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.DynamicStats != nil { + size, err := (*wrapperspb.BoolValue)(m.DynamicStats).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Router_UpstreamAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FlushUpstreamLogOnUpstreamStream { + n += 2 + } + if m.UpstreamLogFlushInterval != nil { + l = (*durationpb.Duration)(m.UpstreamLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Router) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicStats != nil { + l = (*wrapperspb.BoolValue)(m.DynamicStats).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartChildSpan { + n += 2 + } + if len(m.UpstreamLog) > 0 { + for _, e := range m.UpstreamLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SuppressEnvoyHeaders { + n += 2 + } + if len(m.StrictCheckHeaders) > 0 { + for _, s := range m.StrictCheckHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RespectExpectedRqTimeout { + n += 2 + } + if m.SuppressGrpcRequestFailureCodeStats { + n += 2 + } + if len(m.UpstreamHttpFilters) > 0 { + for _, e := range m.UpstreamHttpFilters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UpstreamLogOptions != nil { + l = m.UpstreamLogOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go new file mode 100644 index 000000000..03cc09123 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -0,0 +1,4304 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3" + v36 "github.com/envoyproxy/go-control-plane/envoy/type/http/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HttpConnectionManager_CodecType int32 + +const ( + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + HttpConnectionManager_AUTO HttpConnectionManager_CodecType = 0 + // The connection manager will assume that the client is speaking HTTP/1.1. + HttpConnectionManager_HTTP1 HttpConnectionManager_CodecType = 1 + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HttpConnectionManager_HTTP2 HttpConnectionManager_CodecType = 2 + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HttpConnectionManager_HTTP3 HttpConnectionManager_CodecType = 3 +) + +// Enum value maps for HttpConnectionManager_CodecType. +var ( + HttpConnectionManager_CodecType_name = map[int32]string{ + 0: "AUTO", + 1: "HTTP1", + 2: "HTTP2", + 3: "HTTP3", + } + HttpConnectionManager_CodecType_value = map[string]int32{ + "AUTO": 0, + "HTTP1": 1, + "HTTP2": 2, + "HTTP3": 3, + } +) + +func (x HttpConnectionManager_CodecType) Enum() *HttpConnectionManager_CodecType { + p := new(HttpConnectionManager_CodecType) + *p = x + return p +} + +func (x HttpConnectionManager_CodecType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_CodecType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[0].Descriptor() +} + +func (HttpConnectionManager_CodecType) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[0] +} + +func (x HttpConnectionManager_CodecType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_CodecType.Descriptor instead. +func (HttpConnectionManager_CodecType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0} +} + +type HttpConnectionManager_ServerHeaderTransformation int32 + +const ( + // Overwrite any Server header with the contents of server_name. + HttpConnectionManager_OVERWRITE HttpConnectionManager_ServerHeaderTransformation = 0 + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + HttpConnectionManager_APPEND_IF_ABSENT HttpConnectionManager_ServerHeaderTransformation = 1 + // Pass through the value of the server header, and do not append a header + // if none is present. + HttpConnectionManager_PASS_THROUGH HttpConnectionManager_ServerHeaderTransformation = 2 +) + +// Enum value maps for HttpConnectionManager_ServerHeaderTransformation. +var ( + HttpConnectionManager_ServerHeaderTransformation_name = map[int32]string{ + 0: "OVERWRITE", + 1: "APPEND_IF_ABSENT", + 2: "PASS_THROUGH", + } + HttpConnectionManager_ServerHeaderTransformation_value = map[string]int32{ + "OVERWRITE": 0, + "APPEND_IF_ABSENT": 1, + "PASS_THROUGH": 2, + } +) + +func (x HttpConnectionManager_ServerHeaderTransformation) Enum() *HttpConnectionManager_ServerHeaderTransformation { + p := new(HttpConnectionManager_ServerHeaderTransformation) + *p = x + return p +} + +func (x HttpConnectionManager_ServerHeaderTransformation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_ServerHeaderTransformation) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[1].Descriptor() +} + +func (HttpConnectionManager_ServerHeaderTransformation) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[1] +} + +func (x HttpConnectionManager_ServerHeaderTransformation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_ServerHeaderTransformation.Descriptor instead. +func (HttpConnectionManager_ServerHeaderTransformation) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 1} +} + +// How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP +// header. +type HttpConnectionManager_ForwardClientCertDetails int32 + +const ( + // Do not send the XFCC header to the next hop. This is the default value. + HttpConnectionManager_SANITIZE HttpConnectionManager_ForwardClientCertDetails = 0 + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + HttpConnectionManager_FORWARD_ONLY HttpConnectionManager_ForwardClientCertDetails = 1 + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + HttpConnectionManager_APPEND_FORWARD HttpConnectionManager_ForwardClientCertDetails = 2 + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + HttpConnectionManager_SANITIZE_SET HttpConnectionManager_ForwardClientCertDetails = 3 + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + HttpConnectionManager_ALWAYS_FORWARD_ONLY HttpConnectionManager_ForwardClientCertDetails = 4 +) + +// Enum value maps for HttpConnectionManager_ForwardClientCertDetails. +var ( + HttpConnectionManager_ForwardClientCertDetails_name = map[int32]string{ + 0: "SANITIZE", + 1: "FORWARD_ONLY", + 2: "APPEND_FORWARD", + 3: "SANITIZE_SET", + 4: "ALWAYS_FORWARD_ONLY", + } + HttpConnectionManager_ForwardClientCertDetails_value = map[string]int32{ + "SANITIZE": 0, + "FORWARD_ONLY": 1, + "APPEND_FORWARD": 2, + "SANITIZE_SET": 3, + "ALWAYS_FORWARD_ONLY": 4, + } +) + +func (x HttpConnectionManager_ForwardClientCertDetails) Enum() *HttpConnectionManager_ForwardClientCertDetails { + p := new(HttpConnectionManager_ForwardClientCertDetails) + *p = x + return p +} + +func (x HttpConnectionManager_ForwardClientCertDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_ForwardClientCertDetails) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[2].Descriptor() +} + +func (HttpConnectionManager_ForwardClientCertDetails) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[2] +} + +func (x HttpConnectionManager_ForwardClientCertDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_ForwardClientCertDetails.Descriptor instead. +func (HttpConnectionManager_ForwardClientCertDetails) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 2} +} + +// Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. +// This operation occurs before URL normalization and the merge slashes transformations if they were enabled. +type HttpConnectionManager_PathWithEscapedSlashesAction int32 + +const ( + // Default behavior specific to implementation (i.e. Envoy) of this configuration option. + // Envoy, by default, takes the KEEP_UNCHANGED action. + // NOTE: the implementation may change the default behavior at-will. + HttpConnectionManager_IMPLEMENTATION_SPECIFIC_DEFAULT HttpConnectionManager_PathWithEscapedSlashesAction = 0 + // Keep escaped slashes. + HttpConnectionManager_KEEP_UNCHANGED HttpConnectionManager_PathWithEscapedSlashesAction = 1 + // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. + // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. + HttpConnectionManager_REJECT_REQUEST HttpConnectionManager_PathWithEscapedSlashesAction = 2 + // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. + // Redirect occurs after path normalization and merge slashes transformations if they were configured. + // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. + // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to + // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. + // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each + // redirected request. + HttpConnectionManager_UNESCAPE_AND_REDIRECT HttpConnectionManager_PathWithEscapedSlashesAction = 3 + // Unescape %2F and %5C sequences. + // Note: this option should not be enabled if intermediaries perform path based access control as + // it may lead to path confusion vulnerabilities. + HttpConnectionManager_UNESCAPE_AND_FORWARD HttpConnectionManager_PathWithEscapedSlashesAction = 4 +) + +// Enum value maps for HttpConnectionManager_PathWithEscapedSlashesAction. +var ( + HttpConnectionManager_PathWithEscapedSlashesAction_name = map[int32]string{ + 0: "IMPLEMENTATION_SPECIFIC_DEFAULT", + 1: "KEEP_UNCHANGED", + 2: "REJECT_REQUEST", + 3: "UNESCAPE_AND_REDIRECT", + 4: "UNESCAPE_AND_FORWARD", + } + HttpConnectionManager_PathWithEscapedSlashesAction_value = map[string]int32{ + "IMPLEMENTATION_SPECIFIC_DEFAULT": 0, + "KEEP_UNCHANGED": 1, + "REJECT_REQUEST": 2, + "UNESCAPE_AND_REDIRECT": 3, + "UNESCAPE_AND_FORWARD": 4, + } +) + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) Enum() *HttpConnectionManager_PathWithEscapedSlashesAction { + p := new(HttpConnectionManager_PathWithEscapedSlashesAction) + *p = x + return p +} + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_PathWithEscapedSlashesAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[3].Descriptor() +} + +func (HttpConnectionManager_PathWithEscapedSlashesAction) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[3] +} + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_PathWithEscapedSlashesAction.Descriptor instead. +func (HttpConnectionManager_PathWithEscapedSlashesAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 3} +} + +type HttpConnectionManager_Tracing_OperationName int32 + +const ( + // The HTTP listener is used for ingress/incoming requests. + HttpConnectionManager_Tracing_INGRESS HttpConnectionManager_Tracing_OperationName = 0 + // The HTTP listener is used for egress/outgoing requests. + HttpConnectionManager_Tracing_EGRESS HttpConnectionManager_Tracing_OperationName = 1 +) + +// Enum value maps for HttpConnectionManager_Tracing_OperationName. +var ( + HttpConnectionManager_Tracing_OperationName_name = map[int32]string{ + 0: "INGRESS", + 1: "EGRESS", + } + HttpConnectionManager_Tracing_OperationName_value = map[string]int32{ + "INGRESS": 0, + "EGRESS": 1, + } +) + +func (x HttpConnectionManager_Tracing_OperationName) Enum() *HttpConnectionManager_Tracing_OperationName { + p := new(HttpConnectionManager_Tracing_OperationName) + *p = x + return p +} + +func (x HttpConnectionManager_Tracing_OperationName) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_Tracing_OperationName) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[4].Descriptor() +} + +func (HttpConnectionManager_Tracing_OperationName) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[4] +} + +func (x HttpConnectionManager_Tracing_OperationName) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_Tracing_OperationName.Descriptor instead. +func (HttpConnectionManager_Tracing_OperationName) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// [#next-free-field: 59] +type HttpConnectionManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Supplies the type of codec that the connection manager should use. + CodecType HttpConnectionManager_CodecType `protobuf:"varint,1,opt,name=codec_type,json=codecType,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_CodecType" json:"codec_type,omitempty"` + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + StatPrefix string `protobuf:"bytes,2,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // Types that are assignable to RouteSpecifier: + // + // *HttpConnectionManager_Rds + // *HttpConnectionManager_RouteConfig + // *HttpConnectionManager_ScopedRoutes + RouteSpecifier isHttpConnectionManager_RouteSpecifier `protobuf_oneof:"route_specifier"` + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. + HttpFilters []*HttpFilter `protobuf:"bytes,5,rep,name=http_filters,json=httpFilters,proto3" json:"http_filters,omitempty"` + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + AddUserAgent *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=add_user_agent,json=addUserAgent,proto3" json:"add_user_agent,omitempty"` + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing *HttpConnectionManager_Tracing `protobuf:"bytes,7,opt,name=tracing,proto3" json:"tracing,omitempty"` + // Additional settings for HTTP requests handled by the connection manager. These will be + // applicable to both HTTP1 and HTTP2 requests. + CommonHttpProtocolOptions *v3.HttpProtocolOptions `protobuf:"bytes,35,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"` + // If set to true, Envoy will not start a drain timer for downstream HTTP1 connections after + // :ref:`common_http_protocol_options.max_connection_duration + // ` passes. + // Instead, Envoy will wait for the next downstream request, add connection:close to the response + // headers, then close the connection after the stream ends. + // + // This behavior is compliant with `RFC 9112 section 9.6 `_ + // + // If set to false, “max_connection_duration“ will cause Envoy to enter the normal drain + // sequence for HTTP1 with Envoy eventually closing the connection (once there are no active + // streams). + // + // Has no effect if “max_connection_duration“ is unset. Defaults to false. + Http1SafeMaxConnectionDuration bool `protobuf:"varint,58,opt,name=http1_safe_max_connection_duration,json=http1SafeMaxConnectionDuration,proto3" json:"http1_safe_max_connection_duration,omitempty"` + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + // [#comment:TODO: The following fields are ignored when the + // :ref:`header validation configuration ` + // is present: + // 1. :ref:`allow_chunked_length `] + HttpProtocolOptions *v3.Http1ProtocolOptions `protobuf:"bytes,8,opt,name=http_protocol_options,json=httpProtocolOptions,proto3" json:"http_protocol_options,omitempty"` + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + Http2ProtocolOptions *v3.Http2ProtocolOptions `protobuf:"bytes,9,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` + // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. + // [#not-implemented-hide:] + Http3ProtocolOptions *v3.Http3ProtocolOptions `protobuf:"bytes,44,opt,name=http3_protocol_options,json=http3ProtocolOptions,proto3" json:"http3_protocol_options,omitempty"` + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is “envoy“. + ServerName string `protobuf:"bytes,10,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"` + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation HttpConnectionManager_ServerHeaderTransformation `protobuf:"varint,34,opt,name=server_header_transformation,json=serverHeaderTransformation,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_ServerHeaderTransformation" json:"server_header_transformation,omitempty"` + // Allows for explicit transformation of the :scheme header on the request path. + // If not set, Envoy's default :ref:`scheme ` + // handling applies. + SchemeHeaderTransformation *v3.SchemeHeaderTransformation `protobuf:"bytes,48,opt,name=scheme_header_transformation,json=schemeHeaderTransformation,proto3" json:"scheme_header_transformation,omitempty"` + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + MaxRequestHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` + // The stream idle timeout for connections managed by the connection manager. + // If not specified, this defaults to 5 minutes. The default value was selected + // so as not to interfere with any smaller configured timeouts that may have + // existed in configurations prior to the introduction of this feature, while + // introducing robustness to TCP connections that terminate without a FIN. + // + // This idle timeout applies to new streams and is overridable by the + // :ref:`route-level idle_timeout + // `. Even on a stream in + // which the override applies, prior to receipt of the initial request + // headers, the :ref:`stream_idle_timeout + // ` + // applies. Each time an encode/decode event for headers or data is processed + // for the stream, the timer will be reset. If the timeout fires, the stream + // is terminated with a 408 Request Timeout error code if no upstream response + // header has been received, otherwise a stream reset occurs. + // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled according to the value for + // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + // + // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + // to the granularity of events presented to the connection manager. For example, while receiving + // very large request headers, it may be the case that there is traffic regularly arriving on the + // wire while the connection manage is only able to observe the end-of-headers event, hence the + // stream may still idle timeout. + // + // A value of 0 will completely disable the connection manager stream idle + // timeout, although per-route idle timeout overrides will continue to apply. + StreamIdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + // The amount of time that Envoy will wait for the entire request to be received. + // The timer is activated when the request is initiated, and is disarmed when the last byte of the + // request is sent upstream (i.e. all decoding filters have processed the request), OR when the + // response is initiated. If not specified or set to 0, this timeout is disabled. + RequestTimeout *durationpb.Duration `protobuf:"bytes,28,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + // The amount of time that Envoy will wait for the request headers to be received. The timer is + // activated when the first byte of the headers is received, and is disarmed when the last byte of + // the headers has been received. If not specified or set to 0, this timeout is disabled. + RequestHeadersTimeout *durationpb.Duration `protobuf:"bytes,41,opt,name=request_headers_timeout,json=requestHeadersTimeout,proto3" json:"request_headers_timeout,omitempty"` + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // either when a connection hits the idle timeout, when :ref:`max_connection_duration + // ` + // is reached, or during general server draining. The default grace period is + // 5000 milliseconds (5 seconds) if this option is not specified. + DrainTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=drain_timeout,json=drainTimeout,proto3" json:"drain_timeout,omitempty"` + // The delayed close timeout is for downstream connections managed by the HTTP connection manager. + // It is defined as a grace period after connection close processing has been locally initiated + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. + // + // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + // sequence mitigates a race condition that exists when downstream clients do not drain/process + // data in a connection's receive buffer after a remote close has been detected via a socket + // write(). This race leads to such clients failing to process the response code sent by Envoy, + // which could result in erroneous downstream processing. + // + // If the timeout triggers, Envoy will close the connection's socket. + // + // The default timeout is 1000 ms if this option is not specified. + // + // .. NOTE:: + // + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. + DelayedCloseTimeout *durationpb.Duration `protobuf:"bytes,26,opt,name=delayed_close_timeout,json=delayedCloseTimeout,proto3" json:"delayed_close_timeout,omitempty"` + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + AccessLog []*v31.AccessLog `protobuf:"bytes,13,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The interval to flush the above access logs. + // + // .. attention:: + // + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. + // Note that if both this field and :ref:`access_log_flush_interval + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,54,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log once when a new HTTP request is received, after the request + // headers have been evaluated, and before iterating through the HTTP filter chain. + // + // .. attention:: + // + // This field is deprecated in favor of + // :ref:`flush_access_log_on_new_request + // `. + // Note that if both this field and :ref:`flush_access_log_on_new_request + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. + FlushAccessLogOnNewRequest bool `protobuf:"varint,55,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // Additional access log options for HTTP connection manager. + AccessLogOptions *HttpConnectionManager_HcmAccessLogOptions `protobuf:"bytes,56,opt,name=access_log_options,json=accessLogOptions,proto3" json:"access_log_options,omitempty"` + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + UseRemoteAddress *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=use_remote_address,json=useRemoteAddress,proto3" json:"use_remote_address,omitempty"` + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + XffNumTrustedHops uint32 `protobuf:"varint,19,opt,name=xff_num_trusted_hops,json=xffNumTrustedHops,proto3" json:"xff_num_trusted_hops,omitempty"` + // The configuration for the original IP detection extensions. + // + // When configured the extensions will be called along with the request headers + // and information about the downstream connection, such as the directly connected address. + // Each extension will then use these parameters to decide the request's effective remote address. + // If an extension fails to detect the original IP address and isn't configured to reject + // the request, the HCM will try the remaining extensions until one succeeds or rejects + // the request. If the request isn't rejected nor any extension succeeds, the HCM will + // fallback to using the remote address. + // + // .. WARNING:: + // + // Extensions cannot be used in conjunction with :ref:`use_remote_address + // ` + // nor :ref:`xff_num_trusted_hops + // `. + // + // [#extension-category: envoy.http.original_ip_detection] + OriginalIpDetectionExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,46,rep,name=original_ip_detection_extensions,json=originalIpDetectionExtensions,proto3" json:"original_ip_detection_extensions,omitempty"` + // The configuration for the early header mutation extensions. + // + // When configured the extensions will be called before any routing, tracing, or any filter processing. + // Each extension will be applied in the order they are configured. + // If the same header is mutated by multiple extensions, then the last extension will win. + // + // [#extension-category: envoy.http.early_header_mutation] + EarlyHeaderMutationExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,52,rep,name=early_header_mutation_extensions,json=earlyHeaderMutationExtensions,proto3" json:"early_header_mutation_extensions,omitempty"` + // Configures what network addresses are considered internal for stats and header sanitation + // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information about internal/external addresses. + InternalAddressConfig *HttpConnectionManager_InternalAddressConfig `protobuf:"bytes,25,opt,name=internal_address_config,json=internalAddressConfig,proto3" json:"internal_address_config,omitempty"` + // If set, Envoy will not append the remote address to the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + // has mutated the request headers. While :ref:`use_remote_address + // ` + // will also suppress XFF addition, it has consequences for logging and other + // Envoy uses of the remote address, so “skip_xff_append“ should be used + // when only an elision of XFF addition is intended. + SkipXffAppend bool `protobuf:"varint,21,opt,name=skip_xff_append,json=skipXffAppend,proto3" json:"skip_xff_append,omitempty"` + // Via header value to append to request and response headers. If this is + // empty, no via header will be appended. + Via string `protobuf:"bytes,22,opt,name=via,proto3" json:"via,omitempty"` + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + GenerateRequestId *wrapperspb.BoolValue `protobuf:"bytes,15,opt,name=generate_request_id,json=generateRequestId,proto3" json:"generate_request_id,omitempty"` + // Whether the connection manager will keep the :ref:`x-request-id + // ` header if passed for a request that is edge + // (Edge request is the request from external clients to front Envoy) and not reset it, which + // is the current Envoy behaviour. This defaults to false. + PreserveExternalRequestId bool `protobuf:"varint,32,opt,name=preserve_external_request_id,json=preserveExternalRequestId,proto3" json:"preserve_external_request_id,omitempty"` + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + AlwaysSetRequestIdInResponse bool `protobuf:"varint,37,opt,name=always_set_request_id_in_response,json=alwaysSetRequestIdInResponse,proto3" json:"always_set_request_id_in_response,omitempty"` + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails HttpConnectionManager_ForwardClientCertDetails `protobuf:"varint,16,opt,name=forward_client_cert_details,json=forwardClientCertDetails,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_ForwardClientCertDetails" json:"forward_client_cert_details,omitempty"` + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, “Hash“ is always set, and + // “By“ is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails *HttpConnectionManager_SetCurrentClientCertDetails `protobuf:"bytes,17,opt,name=set_current_client_cert_details,json=setCurrentClientCertDetails,proto3" json:"set_current_client_cert_details,omitempty"` + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + Proxy_100Continue bool `protobuf:"varint,18,opt,name=proxy_100_continue,json=proxy100Continue,proto3" json:"proxy_100_continue,omitempty"` + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to “x-forwarded-for“. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + // [#not-implemented-hide:] + RepresentIpv4RemoteAddressAsIpv4MappedIpv6 bool `protobuf:"varint,20,opt,name=represent_ipv4_remote_address_as_ipv4_mapped_ipv6,json=representIpv4RemoteAddressAsIpv4MappedIpv6,proto3" json:"represent_ipv4_remote_address_as_ipv4_mapped_ipv6,omitempty"` + UpgradeConfigs []*HttpConnectionManager_UpgradeConfig `protobuf:"bytes,23,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream “:path“ header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison `_ + // for details of normalization. + // Note that Envoy does not perform + // `case normalization `_ + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + NormalizePath *wrapperspb.BoolValue `protobuf:"bytes,30,opt,name=normalize_path,json=normalizePath,proto3" json:"normalize_path,omitempty"` + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream “:path“ header as well. Without + // setting this option, incoming requests with path “//dir///file“ will not match against route + // with “prefix“ match set to “/dir“. Defaults to “false“. Note that slash merging is not part of + // `HTTP spec `_ and is provided for convenience. + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + MergeSlashes bool `protobuf:"varint,33,opt,name=merge_slashes,json=mergeSlashes,proto3" json:"merge_slashes,omitempty"` + // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). + // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` + // runtime variable. + // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime + // variable can be used to apply the action to a portion of all requests. + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + PathWithEscapedSlashesAction HttpConnectionManager_PathWithEscapedSlashesAction `protobuf:"varint,45,opt,name=path_with_escaped_slashes_action,json=pathWithEscapedSlashesAction,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_PathWithEscapedSlashesAction" json:"path_with_escaped_slashes_action,omitempty"` + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. If empty, the + // :ref:`UuidRequestIdConfig ` + // default extension is used with default parameters. See the documentation for that extension + // for details on what it does. Customizing the configuration for the default extension can be + // achieved by configuring it explicitly here. For example, to disable trace reason packing, + // the following configuration can be used: + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + // + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig + // pack_trace_reason: false + // + // [#extension-category: envoy.request_id] + RequestIdExtension *RequestIDExtension `protobuf:"bytes,36,opt,name=request_id_extension,json=requestIdExtension,proto3" json:"request_id_extension,omitempty"` + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig *LocalReplyConfig `protobuf:"bytes,38,opt,name=local_reply_config,json=localReplyConfig,proto3" json:"local_reply_config,omitempty"` + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port. This affects the upstream host header unless the method is + // CONNECT in which case if no filter adds a port the original port will be restored before headers are + // sent upstream. + // Without setting this option, incoming requests with host “example:443“ will not match against + // route with :ref:`domains` match set to “example“. Defaults to “false“. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + // Only one of “strip_matching_host_port“ or “strip_any_host_port“ can be set. + StripMatchingHostPort bool `protobuf:"varint,39,opt,name=strip_matching_host_port,json=stripMatchingHostPort,proto3" json:"strip_matching_host_port,omitempty"` + // Types that are assignable to StripPortMode: + // + // *HttpConnectionManager_StripAnyHostPort + StripPortMode isHttpConnectionManager_StripPortMode `protobuf_oneof:"strip_port_mode"` + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message + // ` or the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // “not“ the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + StreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,40,opt,name=stream_error_on_invalid_http_message,json=streamErrorOnInvalidHttpMessage,proto3" json:"stream_error_on_invalid_http_message,omitempty"` + // [#not-implemented-hide:] Path normalization configuration. This includes + // configurations for transformations (e.g. RFC 3986 normalization or merge + // adjacent slashes) and the policy to apply them. The policy determines + // whether transformations affect the forwarded “:path“ header. RFC 3986 path + // normalization is enabled by default and the default policy is that the + // normalized header will be forwarded. See :ref:`PathNormalizationOptions + // ` + // for details. + PathNormalizationOptions *HttpConnectionManager_PathNormalizationOptions `protobuf:"bytes,43,opt,name=path_normalization_options,json=pathNormalizationOptions,proto3" json:"path_normalization_options,omitempty"` + // Determines if trailing dot of the host should be removed from host/authority header before any + // processing of request by HTTP filters or routing. + // This affects the upstream host header. + // Without setting this option, incoming requests with host “example.com.“ will not match against + // route with :ref:`domains` match set to “example.com“. Defaults to “false“. + // When the incoming request contains a host/authority header that includes a port number, + // setting this option will strip a trailing dot, if present, from the host section, + // leaving the port as is (e.g. host value “example.com.:443“ will be updated to “example.com:443“). + StripTrailingHostDot bool `protobuf:"varint,47,opt,name=strip_trailing_host_dot,json=stripTrailingHostDot,proto3" json:"strip_trailing_host_dot,omitempty"` + // Proxy-Status HTTP response header configuration. + // If this config is set, the Proxy-Status HTTP response header field is + // populated. By default, it is not. + ProxyStatusConfig *HttpConnectionManager_ProxyStatusConfig `protobuf:"bytes,49,opt,name=proxy_status_config,json=proxyStatusConfig,proto3" json:"proxy_status_config,omitempty"` + // Configuration options for Header Validation (UHV). + // UHV is an extensible mechanism for checking validity of HTTP requests as well as providing + // normalization for request attributes, such as URI path. + // If the typed_header_validation_config is present it overrides the following options: + // “normalize_path“, “merge_slashes“, “path_with_escaped_slashes_action“ + // “http_protocol_options.allow_chunked_length“, “common_http_protocol_options.headers_with_underscores_action“. + // + // The default UHV checks the following: + // + // #. HTTP/1 header map validity according to `RFC 7230 section 3.2`_ + // #. Syntax of HTTP/1 request target URI and response status + // #. HTTP/2 header map validity according to `RFC 7540 section 8.1.2`_ + // #. Syntax of HTTP/3 pseudo headers + // #. Syntax of “Content-Length“ and “Transfer-Encoding“ + // #. Validation of HTTP/1 requests with both “Content-Length“ and “Transfer-Encoding“ headers + // #. Normalization of the URI path according to `Normalization and Comparison `_ + // + // without `case normalization `_ + // + // [#not-implemented-hide:] + // [#extension-category: envoy.http.header_validators] + TypedHeaderValidationConfig *v3.TypedExtensionConfig `protobuf:"bytes,50,opt,name=typed_header_validation_config,json=typedHeaderValidationConfig,proto3" json:"typed_header_validation_config,omitempty"` + // Append the “x-forwarded-port“ header with the port value client used to connect to Envoy. It + // will be ignored if the “x-forwarded-port“ header has been set by any trusted proxy in front of Envoy. + AppendXForwardedPort bool `protobuf:"varint,51,opt,name=append_x_forwarded_port,json=appendXForwardedPort,proto3" json:"append_x_forwarded_port,omitempty"` + // Append the :ref:`config_http_conn_man_headers_x-envoy-local-overloaded` HTTP header in the scenario where + // the Overload Manager has been triggered. + AppendLocalOverload bool `protobuf:"varint,57,opt,name=append_local_overload,json=appendLocalOverload,proto3" json:"append_local_overload,omitempty"` + // Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to “true“. + // This should be set to “false“ in cases where Envoy's view of the downstream address may not correspond to the + // actual client address, for example, if there's another proxy in front of the Envoy. + AddProxyProtocolConnectionState *wrapperspb.BoolValue `protobuf:"bytes,53,opt,name=add_proxy_protocol_connection_state,json=addProxyProtocolConnectionState,proto3" json:"add_proxy_protocol_connection_state,omitempty"` +} + +func (x *HttpConnectionManager) Reset() { + *x = HttpConnectionManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager) ProtoMessage() {} + +func (x *HttpConnectionManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpConnectionManager) GetCodecType() HttpConnectionManager_CodecType { + if x != nil { + return x.CodecType + } + return HttpConnectionManager_AUTO +} + +func (x *HttpConnectionManager) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (m *HttpConnectionManager) GetRouteSpecifier() isHttpConnectionManager_RouteSpecifier { + if m != nil { + return m.RouteSpecifier + } + return nil +} + +func (x *HttpConnectionManager) GetRds() *Rds { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_Rds); ok { + return x.Rds + } + return nil +} + +func (x *HttpConnectionManager) GetRouteConfig() *v32.RouteConfiguration { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_RouteConfig); ok { + return x.RouteConfig + } + return nil +} + +func (x *HttpConnectionManager) GetScopedRoutes() *ScopedRoutes { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_ScopedRoutes); ok { + return x.ScopedRoutes + } + return nil +} + +func (x *HttpConnectionManager) GetHttpFilters() []*HttpFilter { + if x != nil { + return x.HttpFilters + } + return nil +} + +func (x *HttpConnectionManager) GetAddUserAgent() *wrapperspb.BoolValue { + if x != nil { + return x.AddUserAgent + } + return nil +} + +func (x *HttpConnectionManager) GetTracing() *HttpConnectionManager_Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *HttpConnectionManager) GetCommonHttpProtocolOptions() *v3.HttpProtocolOptions { + if x != nil { + return x.CommonHttpProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp1SafeMaxConnectionDuration() bool { + if x != nil { + return x.Http1SafeMaxConnectionDuration + } + return false +} + +func (x *HttpConnectionManager) GetHttpProtocolOptions() *v3.Http1ProtocolOptions { + if x != nil { + return x.HttpProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp2ProtocolOptions() *v3.Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp3ProtocolOptions() *v3.Http3ProtocolOptions { + if x != nil { + return x.Http3ProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetServerName() string { + if x != nil { + return x.ServerName + } + return "" +} + +func (x *HttpConnectionManager) GetServerHeaderTransformation() HttpConnectionManager_ServerHeaderTransformation { + if x != nil { + return x.ServerHeaderTransformation + } + return HttpConnectionManager_OVERWRITE +} + +func (x *HttpConnectionManager) GetSchemeHeaderTransformation() *v3.SchemeHeaderTransformation { + if x != nil { + return x.SchemeHeaderTransformation + } + return nil +} + +func (x *HttpConnectionManager) GetMaxRequestHeadersKb() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestHeadersKb + } + return nil +} + +func (x *HttpConnectionManager) GetStreamIdleTimeout() *durationpb.Duration { + if x != nil { + return x.StreamIdleTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetRequestTimeout() *durationpb.Duration { + if x != nil { + return x.RequestTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetRequestHeadersTimeout() *durationpb.Duration { + if x != nil { + return x.RequestHeadersTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetDrainTimeout() *durationpb.Duration { + if x != nil { + return x.DrainTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetDelayedCloseTimeout() *durationpb.Duration { + if x != nil { + return x.DelayedCloseTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetAccessLog() []*v31.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. +func (x *HttpConnectionManager) GetAccessLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. +func (x *HttpConnectionManager) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager) GetAccessLogOptions() *HttpConnectionManager_HcmAccessLogOptions { + if x != nil { + return x.AccessLogOptions + } + return nil +} + +func (x *HttpConnectionManager) GetUseRemoteAddress() *wrapperspb.BoolValue { + if x != nil { + return x.UseRemoteAddress + } + return nil +} + +func (x *HttpConnectionManager) GetXffNumTrustedHops() uint32 { + if x != nil { + return x.XffNumTrustedHops + } + return 0 +} + +func (x *HttpConnectionManager) GetOriginalIpDetectionExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.OriginalIpDetectionExtensions + } + return nil +} + +func (x *HttpConnectionManager) GetEarlyHeaderMutationExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.EarlyHeaderMutationExtensions + } + return nil +} + +func (x *HttpConnectionManager) GetInternalAddressConfig() *HttpConnectionManager_InternalAddressConfig { + if x != nil { + return x.InternalAddressConfig + } + return nil +} + +func (x *HttpConnectionManager) GetSkipXffAppend() bool { + if x != nil { + return x.SkipXffAppend + } + return false +} + +func (x *HttpConnectionManager) GetVia() string { + if x != nil { + return x.Via + } + return "" +} + +func (x *HttpConnectionManager) GetGenerateRequestId() *wrapperspb.BoolValue { + if x != nil { + return x.GenerateRequestId + } + return nil +} + +func (x *HttpConnectionManager) GetPreserveExternalRequestId() bool { + if x != nil { + return x.PreserveExternalRequestId + } + return false +} + +func (x *HttpConnectionManager) GetAlwaysSetRequestIdInResponse() bool { + if x != nil { + return x.AlwaysSetRequestIdInResponse + } + return false +} + +func (x *HttpConnectionManager) GetForwardClientCertDetails() HttpConnectionManager_ForwardClientCertDetails { + if x != nil { + return x.ForwardClientCertDetails + } + return HttpConnectionManager_SANITIZE +} + +func (x *HttpConnectionManager) GetSetCurrentClientCertDetails() *HttpConnectionManager_SetCurrentClientCertDetails { + if x != nil { + return x.SetCurrentClientCertDetails + } + return nil +} + +func (x *HttpConnectionManager) GetProxy_100Continue() bool { + if x != nil { + return x.Proxy_100Continue + } + return false +} + +func (x *HttpConnectionManager) GetRepresentIpv4RemoteAddressAsIpv4MappedIpv6() bool { + if x != nil { + return x.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 + } + return false +} + +func (x *HttpConnectionManager) GetUpgradeConfigs() []*HttpConnectionManager_UpgradeConfig { + if x != nil { + return x.UpgradeConfigs + } + return nil +} + +func (x *HttpConnectionManager) GetNormalizePath() *wrapperspb.BoolValue { + if x != nil { + return x.NormalizePath + } + return nil +} + +func (x *HttpConnectionManager) GetMergeSlashes() bool { + if x != nil { + return x.MergeSlashes + } + return false +} + +func (x *HttpConnectionManager) GetPathWithEscapedSlashesAction() HttpConnectionManager_PathWithEscapedSlashesAction { + if x != nil { + return x.PathWithEscapedSlashesAction + } + return HttpConnectionManager_IMPLEMENTATION_SPECIFIC_DEFAULT +} + +func (x *HttpConnectionManager) GetRequestIdExtension() *RequestIDExtension { + if x != nil { + return x.RequestIdExtension + } + return nil +} + +func (x *HttpConnectionManager) GetLocalReplyConfig() *LocalReplyConfig { + if x != nil { + return x.LocalReplyConfig + } + return nil +} + +func (x *HttpConnectionManager) GetStripMatchingHostPort() bool { + if x != nil { + return x.StripMatchingHostPort + } + return false +} + +func (m *HttpConnectionManager) GetStripPortMode() isHttpConnectionManager_StripPortMode { + if m != nil { + return m.StripPortMode + } + return nil +} + +func (x *HttpConnectionManager) GetStripAnyHostPort() bool { + if x, ok := x.GetStripPortMode().(*HttpConnectionManager_StripAnyHostPort); ok { + return x.StripAnyHostPort + } + return false +} + +func (x *HttpConnectionManager) GetStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.StreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *HttpConnectionManager) GetPathNormalizationOptions() *HttpConnectionManager_PathNormalizationOptions { + if x != nil { + return x.PathNormalizationOptions + } + return nil +} + +func (x *HttpConnectionManager) GetStripTrailingHostDot() bool { + if x != nil { + return x.StripTrailingHostDot + } + return false +} + +func (x *HttpConnectionManager) GetProxyStatusConfig() *HttpConnectionManager_ProxyStatusConfig { + if x != nil { + return x.ProxyStatusConfig + } + return nil +} + +func (x *HttpConnectionManager) GetTypedHeaderValidationConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.TypedHeaderValidationConfig + } + return nil +} + +func (x *HttpConnectionManager) GetAppendXForwardedPort() bool { + if x != nil { + return x.AppendXForwardedPort + } + return false +} + +func (x *HttpConnectionManager) GetAppendLocalOverload() bool { + if x != nil { + return x.AppendLocalOverload + } + return false +} + +func (x *HttpConnectionManager) GetAddProxyProtocolConnectionState() *wrapperspb.BoolValue { + if x != nil { + return x.AddProxyProtocolConnectionState + } + return nil +} + +type isHttpConnectionManager_RouteSpecifier interface { + isHttpConnectionManager_RouteSpecifier() +} + +type HttpConnectionManager_Rds struct { + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds *Rds `protobuf:"bytes,3,opt,name=rds,proto3,oneof"` +} + +type HttpConnectionManager_RouteConfig struct { + // The route table for the connection manager is static and is specified in this property. + RouteConfig *v32.RouteConfiguration `protobuf:"bytes,4,opt,name=route_config,json=routeConfig,proto3,oneof"` +} + +type HttpConnectionManager_ScopedRoutes struct { + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes *ScopedRoutes `protobuf:"bytes,31,opt,name=scoped_routes,json=scopedRoutes,proto3,oneof"` +} + +func (*HttpConnectionManager_Rds) isHttpConnectionManager_RouteSpecifier() {} + +func (*HttpConnectionManager_RouteConfig) isHttpConnectionManager_RouteSpecifier() {} + +func (*HttpConnectionManager_ScopedRoutes) isHttpConnectionManager_RouteSpecifier() {} + +type isHttpConnectionManager_StripPortMode interface { + isHttpConnectionManager_StripPortMode() +} + +type HttpConnectionManager_StripAnyHostPort struct { + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. + // This affects the upstream host header unless the method is CONNECT in + // which case if no filter adds a port the original port will be restored before headers are sent upstream. + // Without setting this option, incoming requests with host “example:443“ will not match against + // route with :ref:`domains` match set to “example“. Defaults to “false“. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + // Only one of “strip_matching_host_port“ or “strip_any_host_port“ can be set. + StripAnyHostPort bool `protobuf:"varint,42,opt,name=strip_any_host_port,json=stripAnyHostPort,proto3,oneof"` +} + +func (*HttpConnectionManager_StripAnyHostPort) isHttpConnectionManager_StripPortMode() {} + +// The configuration to customize local reply returned by Envoy. +type LocalReplyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + Mappers []*ResponseMapper `protobuf:"bytes,1,rep,name=mappers,proto3" json:"mappers,omitempty"` + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: "plain/text" “body_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // The following response body in "plain/text" format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + // Example two: "application/json" “body_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + BodyFormat *v3.SubstitutionFormatString `protobuf:"bytes,2,opt,name=body_format,json=bodyFormat,proto3" json:"body_format,omitempty"` +} + +func (x *LocalReplyConfig) Reset() { + *x = LocalReplyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalReplyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalReplyConfig) ProtoMessage() {} + +func (x *LocalReplyConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalReplyConfig.ProtoReflect.Descriptor instead. +func (*LocalReplyConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{1} +} + +func (x *LocalReplyConfig) GetMappers() []*ResponseMapper { + if x != nil { + return x.Mappers + } + return nil +} + +func (x *LocalReplyConfig) GetBodyFormat() *v3.SubstitutionFormatString { + if x != nil { + return x.BodyFormat + } + return nil +} + +// The configuration to filter and change local response. +// [#next-free-field: 6] +type ResponseMapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Filter to determine if this mapper should apply. + Filter *v31.AccessLogFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The new response status code if specified. + StatusCode *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // The new local reply body text if specified. It will be used in the “%LOCAL_REPLY_BODY%“ + // command operator in the “body_format“. + Body *v3.DataSource `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` + // A per mapper “body_format“ to override the :ref:`body_format `. + // It will be used when this mapper is matched. + BodyFormatOverride *v3.SubstitutionFormatString `protobuf:"bytes,4,opt,name=body_format_override,json=bodyFormatOverride,proto3" json:"body_format_override,omitempty"` + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + HeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,5,rep,name=headers_to_add,json=headersToAdd,proto3" json:"headers_to_add,omitempty"` +} + +func (x *ResponseMapper) Reset() { + *x = ResponseMapper{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMapper) ProtoMessage() {} + +func (x *ResponseMapper) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMapper.ProtoReflect.Descriptor instead. +func (*ResponseMapper) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{2} +} + +func (x *ResponseMapper) GetFilter() *v31.AccessLogFilter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ResponseMapper) GetStatusCode() *wrapperspb.UInt32Value { + if x != nil { + return x.StatusCode + } + return nil +} + +func (x *ResponseMapper) GetBody() *v3.DataSource { + if x != nil { + return x.Body + } + return nil +} + +func (x *ResponseMapper) GetBodyFormatOverride() *v3.SubstitutionFormatString { + if x != nil { + return x.BodyFormatOverride + } + return nil +} + +func (x *ResponseMapper) GetHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.HeadersToAdd + } + return nil +} + +type Rds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for RDS. + ConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + RouteConfigName string `protobuf:"bytes,2,opt,name=route_config_name,json=routeConfigName,proto3" json:"route_config_name,omitempty"` +} + +func (x *Rds) Reset() { + *x = Rds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Rds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rds) ProtoMessage() {} + +func (x *Rds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Rds.ProtoReflect.Descriptor instead. +func (*Rds) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{3} +} + +func (x *Rds) GetConfigSource() *v3.ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +func (x *Rds) GetRouteConfigName() string { + if x != nil { + return x.RouteConfigName + } + return "" +} + +// This message is used to work around the limitations with 'oneof' and repeated fields. +type ScopedRouteConfigurationsList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScopedRouteConfigurations []*v32.ScopedRouteConfiguration `protobuf:"bytes,1,rep,name=scoped_route_configurations,json=scopedRouteConfigurations,proto3" json:"scoped_route_configurations,omitempty"` +} + +func (x *ScopedRouteConfigurationsList) Reset() { + *x = ScopedRouteConfigurationsList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfigurationsList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfigurationsList) ProtoMessage() {} + +func (x *ScopedRouteConfigurationsList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfigurationsList.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfigurationsList) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{4} +} + +func (x *ScopedRouteConfigurationsList) GetScopedRouteConfigurations() []*v32.ScopedRouteConfiguration { + if x != nil { + return x.ScopedRouteConfigurations + } + return nil +} + +// [#next-free-field: 6] +type ScopedRoutes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped routing configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder *ScopedRoutes_ScopeKeyBuilder `protobuf:"bytes,2,opt,name=scope_key_builder,json=scopeKeyBuilder,proto3" json:"scope_key_builder,omitempty"` + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + RdsConfigSource *v3.ConfigSource `protobuf:"bytes,3,opt,name=rds_config_source,json=rdsConfigSource,proto3" json:"rds_config_source,omitempty"` + // Types that are assignable to ConfigSpecifier: + // + // *ScopedRoutes_ScopedRouteConfigurationsList + // *ScopedRoutes_ScopedRds + ConfigSpecifier isScopedRoutes_ConfigSpecifier `protobuf_oneof:"config_specifier"` +} + +func (x *ScopedRoutes) Reset() { + *x = ScopedRoutes{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes) ProtoMessage() {} + +func (x *ScopedRoutes) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes.ProtoReflect.Descriptor instead. +func (*ScopedRoutes) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5} +} + +func (x *ScopedRoutes) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutes) GetScopeKeyBuilder() *ScopedRoutes_ScopeKeyBuilder { + if x != nil { + return x.ScopeKeyBuilder + } + return nil +} + +func (x *ScopedRoutes) GetRdsConfigSource() *v3.ConfigSource { + if x != nil { + return x.RdsConfigSource + } + return nil +} + +func (m *ScopedRoutes) GetConfigSpecifier() isScopedRoutes_ConfigSpecifier { + if m != nil { + return m.ConfigSpecifier + } + return nil +} + +func (x *ScopedRoutes) GetScopedRouteConfigurationsList() *ScopedRouteConfigurationsList { + if x, ok := x.GetConfigSpecifier().(*ScopedRoutes_ScopedRouteConfigurationsList); ok { + return x.ScopedRouteConfigurationsList + } + return nil +} + +func (x *ScopedRoutes) GetScopedRds() *ScopedRds { + if x, ok := x.GetConfigSpecifier().(*ScopedRoutes_ScopedRds); ok { + return x.ScopedRds + } + return nil +} + +type isScopedRoutes_ConfigSpecifier interface { + isScopedRoutes_ConfigSpecifier() +} + +type ScopedRoutes_ScopedRouteConfigurationsList struct { + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList *ScopedRouteConfigurationsList `protobuf:"bytes,4,opt,name=scoped_route_configurations_list,json=scopedRouteConfigurationsList,proto3,oneof"` +} + +type ScopedRoutes_ScopedRds struct { + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds *ScopedRds `protobuf:"bytes,5,opt,name=scoped_rds,json=scopedRds,proto3,oneof"` +} + +func (*ScopedRoutes_ScopedRouteConfigurationsList) isScopedRoutes_ConfigSpecifier() {} + +func (*ScopedRoutes_ScopedRds) isScopedRoutes_ConfigSpecifier() {} + +type ScopedRds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for scoped RDS. + ScopedRdsConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=scoped_rds_config_source,json=scopedRdsConfigSource,proto3" json:"scoped_rds_config_source,omitempty"` + // xdstp:// resource locator for scoped RDS collection. + // [#not-implemented-hide:] + SrdsResourcesLocator string `protobuf:"bytes,2,opt,name=srds_resources_locator,json=srdsResourcesLocator,proto3" json:"srds_resources_locator,omitempty"` +} + +func (x *ScopedRds) Reset() { + *x = ScopedRds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRds) ProtoMessage() {} + +func (x *ScopedRds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRds.ProtoReflect.Descriptor instead. +func (*ScopedRds) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{6} +} + +func (x *ScopedRds) GetScopedRdsConfigSource() *v3.ConfigSource { + if x != nil { + return x.ScopedRdsConfigSource + } + return nil +} + +func (x *ScopedRds) GetSrdsResourcesLocator() string { + if x != nil { + return x.SrdsResourcesLocator + } + return "" +} + +// [#next-free-field: 8] +type HttpFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. It also serves as a resource name in ExtensionConfigDS. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *HttpFilter_TypedConfig + // *HttpFilter_ConfigDiscovery + ConfigType isHttpFilter_ConfigType `protobuf_oneof:"config_type"` + // If true, clients that do not support this filter may ignore the + // filter but otherwise accept the config. + // Otherwise, clients that do not support this filter must reject the config. + IsOptional bool `protobuf:"varint,6,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` + // If true, the filter is disabled by default and must be explicitly enabled by setting + // per filter configuration in the route configuration. + // See :ref:`route based filter chain ` + // for more details. + // + // Terminal filters (e.g. “envoy.filters.http.router“) cannot be marked as disabled. + Disabled bool `protobuf:"varint,7,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *HttpFilter) Reset() { + *x = HttpFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpFilter) ProtoMessage() {} + +func (x *HttpFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpFilter.ProtoReflect.Descriptor instead. +func (*HttpFilter) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{7} +} + +func (x *HttpFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HttpFilter) GetConfigType() isHttpFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *HttpFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*HttpFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *HttpFilter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*HttpFilter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +func (x *HttpFilter) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +func (x *HttpFilter) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +type isHttpFilter_ConfigType interface { + isHttpFilter_ConfigType() +} + +type HttpFilter_TypedConfig struct { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + // + // To support configuring a :ref:`match tree `, use an + // :ref:`ExtensionWithMatcher ` + // with the desired HTTP filter. + // [#extension-category: envoy.filters.http] + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type HttpFilter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + // + // To support configuring a :ref:`match tree `, use an + // :ref:`ExtensionWithMatcher ` + // with the desired HTTP filter. This works for both the default filter configuration as well + // as for filters provided via the API. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*HttpFilter_TypedConfig) isHttpFilter_ConfigType() {} + +func (*HttpFilter_ConfigDiscovery) isHttpFilter_ConfigType() {} + +type RequestIDExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Request ID extension specific configuration. + TypedConfig *anypb.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *RequestIDExtension) Reset() { + *x = RequestIDExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestIDExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestIDExtension) ProtoMessage() {} + +func (x *RequestIDExtension) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestIDExtension.ProtoReflect.Descriptor instead. +func (*RequestIDExtension) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{8} +} + +func (x *RequestIDExtension) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// [#protodoc-title: Envoy Mobile HTTP connection manager] +// HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] +type EnvoyMobileHttpConnectionManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The configuration for the underlying HttpConnectionManager which will be + // instantiated for Envoy mobile. + Config *HttpConnectionManager `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *EnvoyMobileHttpConnectionManager) Reset() { + *x = EnvoyMobileHttpConnectionManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnvoyMobileHttpConnectionManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnvoyMobileHttpConnectionManager) ProtoMessage() {} + +func (x *EnvoyMobileHttpConnectionManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnvoyMobileHttpConnectionManager.ProtoReflect.Descriptor instead. +func (*EnvoyMobileHttpConnectionManager) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{9} +} + +func (x *EnvoyMobileHttpConnectionManager) GetConfig() *HttpConnectionManager { + if x != nil { + return x.Config + } + return nil +} + +// [#next-free-field: 11] +type HttpConnectionManager_Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + ClientSampling *v33.Percent `protobuf:"bytes,3,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + RandomSampling *v33.Percent `protobuf:"bytes,4,opt,name=random_sampling,json=randomSampling,proto3" json:"random_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + OverallSampling *v33.Percent `protobuf:"bytes,5,opt,name=overall_sampling,json=overallSampling,proto3" json:"overall_sampling,omitempty"` + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + Verbose bool `protobuf:"varint,6,opt,name=verbose,proto3" json:"verbose,omitempty"` + // Maximum length of the request path to extract and include in the HttpUrl tag. Used to + // truncate lengthy request paths to meet the needs of a tracing backend. + // Default: 256 + MaxPathTagLength *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_path_tag_length,json=maxPathTagLength,proto3" json:"max_path_tag_length,omitempty"` + // A list of custom tags with unique tag name to create tags for the active span. + CustomTags []*v34.CustomTag `protobuf:"bytes,8,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty"` + // Configuration for an external tracing provider. + // If not specified, no tracing will be performed. + // + // .. attention:: + // + // Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once + // in Envoy lifetime. + // Any attempts to reconfigure it or to use different configurations for different HCM filters + // will be rejected. + // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + // on OpenCensus side. + Provider *v35.Tracing_Http `protobuf:"bytes,9,opt,name=provider,proto3" json:"provider,omitempty"` + // Create separate tracing span for each upstream request if true. And if this flag is set to true, + // the tracing provider will assume that Envoy will be independent hop in the trace chain and may + // set span type to client or server based on this flag. + // This will deprecate the + // :ref:`start_child_span ` + // in the router. + // + // Users should set appropriate value based on their tracing provider and actual scenario: + // + // - If Envoy is used as sidecar and users want to make the sidecar and its application as only one + // hop in the trace chain, this flag should be set to false. And please also make sure the + // :ref:`start_child_span ` + // in the router is not set to true. + // - If Envoy is used as gateway or independent proxy, or users want to make the sidecar and its + // application as different hops in the trace chain, this flag should be set to true. + // - If tracing provider that has explicit requirements on span creation (like SkyWalking), + // this flag should be set to true. + // + // The default value is false for now for backward compatibility. + SpawnUpstreamSpan *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=spawn_upstream_span,json=spawnUpstreamSpan,proto3" json:"spawn_upstream_span,omitempty"` +} + +func (x *HttpConnectionManager_Tracing) Reset() { + *x = HttpConnectionManager_Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_Tracing) ProtoMessage() {} + +func (x *HttpConnectionManager_Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_Tracing.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_Tracing) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *HttpConnectionManager_Tracing) GetClientSampling() *v33.Percent { + if x != nil { + return x.ClientSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetRandomSampling() *v33.Percent { + if x != nil { + return x.RandomSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetOverallSampling() *v33.Percent { + if x != nil { + return x.OverallSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +func (x *HttpConnectionManager_Tracing) GetMaxPathTagLength() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxPathTagLength + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetCustomTags() []*v34.CustomTag { + if x != nil { + return x.CustomTags + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetProvider() *v35.Tracing_Http { + if x != nil { + return x.Provider + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetSpawnUpstreamSpan() *wrapperspb.BoolValue { + if x != nil { + return x.SpawnUpstreamSpan + } + return nil +} + +type HttpConnectionManager_InternalAddressConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether unix socket addresses should be considered internal. + UnixSockets bool `protobuf:"varint,1,opt,name=unix_sockets,json=unixSockets,proto3" json:"unix_sockets,omitempty"` + // List of CIDR ranges that are treated as internal. If unset, then RFC1918 / RFC4193 + // IP addresses will be considered internal. + CidrRanges []*v3.CidrRange `protobuf:"bytes,2,rep,name=cidr_ranges,json=cidrRanges,proto3" json:"cidr_ranges,omitempty"` +} + +func (x *HttpConnectionManager_InternalAddressConfig) Reset() { + *x = HttpConnectionManager_InternalAddressConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_InternalAddressConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_InternalAddressConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_InternalAddressConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_InternalAddressConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_InternalAddressConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *HttpConnectionManager_InternalAddressConfig) GetUnixSockets() bool { + if x != nil { + return x.UnixSockets + } + return false +} + +func (x *HttpConnectionManager_InternalAddressConfig) GetCidrRanges() []*v3.CidrRange { + if x != nil { + return x.CidrRanges + } + return nil +} + +// [#next-free-field: 7] +type HttpConnectionManager_SetCurrentClientCertDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether to forward the subject of the client cert. Defaults to false. + Subject *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + Cert bool `protobuf:"varint,3,opt,name=cert,proto3" json:"cert,omitempty"` + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + Chain bool `protobuf:"varint,6,opt,name=chain,proto3" json:"chain,omitempty"` + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + Dns bool `protobuf:"varint,4,opt,name=dns,proto3" json:"dns,omitempty"` + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + Uri bool `protobuf:"varint,5,opt,name=uri,proto3" json:"uri,omitempty"` +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) Reset() { + *x = HttpConnectionManager_SetCurrentClientCertDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_SetCurrentClientCertDetails) ProtoMessage() {} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_SetCurrentClientCertDetails.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_SetCurrentClientCertDetails) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetSubject() *wrapperspb.BoolValue { + if x != nil { + return x.Subject + } + return nil +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetCert() bool { + if x != nil { + return x.Cert + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetChain() bool { + if x != nil { + return x.Chain + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetDns() bool { + if x != nil { + return x.Dns + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetUri() bool { + if x != nil { + return x.Uri + } + return false +} + +// The configuration for HTTP upgrades. +// For each upgrade type desired, an UpgradeConfig must be added. +// +// .. warning:: +// +// The current implementation of upgrade headers does not handle +// multi-valued upgrade headers. Support for multi-valued headers may be +// added in the future if needed. +// +// .. warning:: +// +// The current implementation of upgrade headers does not work with HTTP/2 +// upstreams. +type HttpConnectionManager_UpgradeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + UpgradeType string `protobuf:"bytes,1,opt,name=upgrade_type,json=upgradeType,proto3" json:"upgrade_type,omitempty"` + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + Filters []*HttpFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *HttpConnectionManager_UpgradeConfig) Reset() { + *x = HttpConnectionManager_UpgradeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_UpgradeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_UpgradeConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_UpgradeConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_UpgradeConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_UpgradeConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *HttpConnectionManager_UpgradeConfig) GetUpgradeType() string { + if x != nil { + return x.UpgradeType + } + return "" +} + +func (x *HttpConnectionManager_UpgradeConfig) GetFilters() []*HttpFilter { + if x != nil { + return x.Filters + } + return nil +} + +func (x *HttpConnectionManager_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +// [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied +// before any processing of requests by HTTP filters, routing, and matching. Only the normalized +// path will be visible internally if a transformation is enabled. Any path rewrites that the +// router performs (e.g. :ref:`regex_rewrite +// ` or :ref:`prefix_rewrite +// `) will apply to the “:path“ header +// destined for the upstream. +// +// Note: access logging and tracing will show the original “:path“ header. +type HttpConnectionManager_PathNormalizationOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] Normalization applies internally before any processing of requests by + // HTTP filters, routing, and matching *and* will affect the forwarded “:path“ header. Defaults + // to :ref:`NormalizePathRFC3986 + // `. When not + // specified, this value may be overridden by the runtime variable + // :ref:`http_connection_manager.normalize_path`. + // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + // normalization due to disallowed characters.) + ForwardingTransformation *v36.PathTransformation `protobuf:"bytes,1,opt,name=forwarding_transformation,json=forwardingTransformation,proto3" json:"forwarding_transformation,omitempty"` + // [#not-implemented-hide:] Normalization only applies internally before any processing of + // requests by HTTP filters, routing, and matching. These will be applied after full + // transformation is applied. The “:path“ header before this transformation will be restored in + // the router filter and sent upstream unless it was mutated by a filter. Defaults to no + // transformations. + // Multiple actions can be applied in the same Transformation, forming a sequential + // pipeline. The transformations will be performed in the order that they appear. Envoy will + // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + // normalization due to disallowed characters.) + HttpFilterTransformation *v36.PathTransformation `protobuf:"bytes,2,opt,name=http_filter_transformation,json=httpFilterTransformation,proto3" json:"http_filter_transformation,omitempty"` +} + +func (x *HttpConnectionManager_PathNormalizationOptions) Reset() { + *x = HttpConnectionManager_PathNormalizationOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_PathNormalizationOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_PathNormalizationOptions) ProtoMessage() {} + +func (x *HttpConnectionManager_PathNormalizationOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_PathNormalizationOptions.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_PathNormalizationOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *HttpConnectionManager_PathNormalizationOptions) GetForwardingTransformation() *v36.PathTransformation { + if x != nil { + return x.ForwardingTransformation + } + return nil +} + +func (x *HttpConnectionManager_PathNormalizationOptions) GetHttpFilterTransformation() *v36.PathTransformation { + if x != nil { + return x.HttpFilterTransformation + } + return nil +} + +// Configures the manner in which the Proxy-Status HTTP response header is +// populated. +// +// See the [Proxy-Status +// RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-proxy-status-08). +// [#comment:TODO: Update this with the non-draft URL when finalized.] +// +// The Proxy-Status header is a string of the form: +// +// "; error=; details=
" +// +// [#next-free-field: 7] +type HttpConnectionManager_ProxyStatusConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the details field of the Proxy-Status header is not populated with stream_info.response_code_details. + // This value defaults to “false“, i.e. the “details“ field is populated by default. + RemoveDetails bool `protobuf:"varint,1,opt,name=remove_details,json=removeDetails,proto3" json:"remove_details,omitempty"` + // If true, the details field of the Proxy-Status header will not contain + // connection termination details. This value defaults to “false“, i.e. the + // “details“ field will contain connection termination details by default. + RemoveConnectionTerminationDetails bool `protobuf:"varint,2,opt,name=remove_connection_termination_details,json=removeConnectionTerminationDetails,proto3" json:"remove_connection_termination_details,omitempty"` + // If true, the details field of the Proxy-Status header will not contain an + // enumeration of the Envoy ResponseFlags. This value defaults to “false“, + // i.e. the “details“ field will contain a list of ResponseFlags by default. + RemoveResponseFlags bool `protobuf:"varint,3,opt,name=remove_response_flags,json=removeResponseFlags,proto3" json:"remove_response_flags,omitempty"` + // If true, overwrites the existing Status header with the response code + // recommended by the Proxy-Status spec. + // This value defaults to “false“, i.e. the HTTP response code is not + // overwritten. + SetRecommendedResponseCode bool `protobuf:"varint,4,opt,name=set_recommended_response_code,json=setRecommendedResponseCode,proto3" json:"set_recommended_response_code,omitempty"` + // The name of the proxy as it appears at the start of the Proxy-Status + // header. + // + // If neither of these values are set, this value defaults to “server_name“, + // which itself defaults to "envoy". + // + // Types that are assignable to ProxyName: + // + // *HttpConnectionManager_ProxyStatusConfig_UseNodeId + // *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName + ProxyName isHttpConnectionManager_ProxyStatusConfig_ProxyName `protobuf_oneof:"proxy_name"` +} + +func (x *HttpConnectionManager_ProxyStatusConfig) Reset() { + *x = HttpConnectionManager_ProxyStatusConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_ProxyStatusConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_ProxyStatusConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_ProxyStatusConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_ProxyStatusConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_ProxyStatusConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveDetails() bool { + if x != nil { + return x.RemoveDetails + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveConnectionTerminationDetails() bool { + if x != nil { + return x.RemoveConnectionTerminationDetails + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveResponseFlags() bool { + if x != nil { + return x.RemoveResponseFlags + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetSetRecommendedResponseCode() bool { + if x != nil { + return x.SetRecommendedResponseCode + } + return false +} + +func (m *HttpConnectionManager_ProxyStatusConfig) GetProxyName() isHttpConnectionManager_ProxyStatusConfig_ProxyName { + if m != nil { + return m.ProxyName + } + return nil +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetUseNodeId() bool { + if x, ok := x.GetProxyName().(*HttpConnectionManager_ProxyStatusConfig_UseNodeId); ok { + return x.UseNodeId + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetLiteralProxyName() string { + if x, ok := x.GetProxyName().(*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName); ok { + return x.LiteralProxyName + } + return "" +} + +type isHttpConnectionManager_ProxyStatusConfig_ProxyName interface { + isHttpConnectionManager_ProxyStatusConfig_ProxyName() +} + +type HttpConnectionManager_ProxyStatusConfig_UseNodeId struct { + // If “use_node_id“ is set, Proxy-Status headers will use the Envoy's node + // ID as the name of the proxy. + UseNodeId bool `protobuf:"varint,5,opt,name=use_node_id,json=useNodeId,proto3,oneof"` +} + +type HttpConnectionManager_ProxyStatusConfig_LiteralProxyName struct { + // If “literal_proxy_name“ is set, Proxy-Status headers will use this + // value as the name of the proxy. + LiteralProxyName string `protobuf:"bytes,6,opt,name=literal_proxy_name,json=literalProxyName,proto3,oneof"` +} + +func (*HttpConnectionManager_ProxyStatusConfig_UseNodeId) isHttpConnectionManager_ProxyStatusConfig_ProxyName() { +} + +func (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) isHttpConnectionManager_ProxyStatusConfig_ProxyName() { +} + +type HttpConnectionManager_HcmAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The interval to flush the above access logs. By default, the HCM will flush exactly one access log + // on stream close, when the HTTP request is complete. If this field is set, the HCM will flush access + // logs periodically at the specified interval. This is especially useful in the case of long-lived + // requests, such as CONNECT and Websockets. Final access logs can be detected via the + // “requestComplete()“ method of “StreamInfo“ in access log filters, or through the “%DURATION%“ substitution + // string. + // The interval must be at least 1 millisecond. + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log when a new HTTP request is received, after request + // headers have been evaluated, before iterating through the HTTP filter chain. + // This log record, if enabled, does not depend on periodic log records or request completion log. + // Details related to upstream cluster, such as upstream host, will not be available for this log. + FlushAccessLogOnNewRequest bool `protobuf:"varint,2,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // If true, the HCM will flush an access log when a tunnel is successfully established. For example, + // this could be when an upstream has successfully returned 101 Switching Protocols, or when the proxy + // has returned 200 to a CONNECT request. + FlushLogOnTunnelSuccessfullyEstablished bool `protobuf:"varint,3,opt,name=flush_log_on_tunnel_successfully_established,json=flushLogOnTunnelSuccessfullyEstablished,proto3" json:"flush_log_on_tunnel_successfully_established,omitempty"` +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) Reset() { + *x = HttpConnectionManager_HcmAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_HcmAccessLogOptions) ProtoMessage() {} + +func (x *HttpConnectionManager_HcmAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_HcmAccessLogOptions.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_HcmAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetAccessLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushLogOnTunnelSuccessfullyEstablished() bool { + if x != nil { + return x.FlushLogOnTunnelSuccessfullyEstablished + } + return false +} + +// Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These +// keys are matched against a set of :ref:`Key` +// objects assembled from :ref:`ScopedRouteConfiguration` +// messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via +// :ref:`scoped_route_configurations_list`. +// +// Upon receiving a request's headers, the Router will build a key using the algorithm specified +// by this message. This key will be used to look up the routing table (i.e., the +// :ref:`RouteConfiguration`) to use for the request. +type ScopedRoutes_ScopeKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the + // fragments of a :ref:`ScopedRouteConfiguration`. + // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. + Fragments []*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder `protobuf:"bytes,1,rep,name=fragments,proto3" json:"fragments,omitempty"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder) GetFragments() []*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder { + if x != nil { + return x.Fragments + } + return nil +} + +// Specifies the mechanism for constructing key fragments which are composed into scope keys. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ + Type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type `protobuf_oneof:"type"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) GetType() isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) GetHeaderValueExtractor() *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor { + if x, ok := x.GetType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_); ok { + return x.HeaderValueExtractor + } + return nil +} + +type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type interface { + isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type() +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ struct { + // Specifies how a header field's value should be extracted. + HeaderValueExtractor *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor `protobuf:"bytes,1,opt,name=header_value_extractor,json=headerValueExtractor,proto3,oneof"` +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type() { +} + +// Specifies how the value of a header should be extracted. +// The following example maps the structure of a header to the fields in this message. +// +// .. code:: +// +// <0> <1> <-- index +// X-Header: a=b;c=d +// | || | +// | || \----> +// | || +// | |\----> +// | | +// | \----> +// | +// \----> +// +// Each 'a=b' key-value pair constitutes an 'element' of the header field. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the header field to extract the value from. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + ElementSeparator string `protobuf:"bytes,2,opt,name=element_separator,json=elementSeparator,proto3" json:"element_separator,omitempty"` + // Types that are assignable to ExtractType: + // + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element + ExtractType isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType `protobuf_oneof:"extract_type"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetElementSeparator() string { + if x != nil { + return x.ElementSeparator + } + return "" +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetExtractType() isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType { + if m != nil { + return m.ExtractType + } + return nil +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetIndex() uint32 { + if x, ok := x.GetExtractType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index); ok { + return x.Index + } + return 0 +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetElement() *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement { + if x, ok := x.GetExtractType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element); ok { + return x.Element + } + return nil +} + +type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType interface { + isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index struct { + // Specifies the zero based index of the element to extract. + // Note Envoy concatenates multiple values of the same header key into a comma separated + // string, the splitting always happens after the concatenation. + Index uint32 `protobuf:"varint,3,opt,name=index,proto3,oneof"` +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element struct { + // Specifies the key value pair to extract the value from. + Element *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement `protobuf:"bytes,4,opt,name=element,proto3,oneof"` +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() { +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() { +} + +// Specifies a header field's key value pair to match on. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The separator between key and value (e.g., '=' separates 'k=v;...'). + // If an element is an empty string, the element is ignored. + // If an element contains no separator, the whole element is parsed as key and the + // fragment value is an empty string. + // If there are multiple values for a matched key, the first value is returned. + Separator string `protobuf:"bytes,1,opt,name=separator,proto3" json:"separator,omitempty"` + // The key to match on. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0, 0, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) GetSeparator() string { + if x != nil { + return x.Separator + } + return "" +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +var File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc = []byte{ + 0x0a, 0x59, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3b, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, + 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x42, 0x0a, 0x15, 0x48, 0x74, + 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x12, 0x85, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, + 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x54, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x64, 0x73, 0x48, 0x00, 0x52, 0x03, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0c, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x0d, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, + 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x6a, 0x0a, + 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x68, 0x74, + 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x64, 0x64, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x61, + 0x64, 0x64, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x74, 0x0a, 0x07, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x12, 0x73, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x19, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x22, 0x68, 0x74, 0x74, 0x70, 0x31, 0x5f, + 0x73, 0x61, 0x66, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x3a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1e, 0x68, 0x74, 0x74, 0x70, 0x31, 0x53, 0x61, 0x66, 0x65, 0x4d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x68, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, + 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, + 0x16, 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x2c, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x02, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0xb9, 0x01, + 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x1a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1c, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x2a, 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, 0x12, 0x52, 0x0a, 0x13, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x11, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x62, 0x0a, + 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x6d, 0x0a, 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x17, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x16, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x37, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1a, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x4e, 0x65, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x38, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x66, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x48, 0x63, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x51, 0x0a, + 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x10, + 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x70, + 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x70, + 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x65, 0x61, + 0x72, 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x17, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x68, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x58, 0x66, 0x66, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, 0x61, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, + 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x1b, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x1b, 0x73, 0x65, + 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x31, 0x30, 0x30, 0x43, + 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, 0x65, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x70, 0x76, 0x34, + 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x76, + 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x41, 0x73, + 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, 0x76, 0x36, 0x12, 0x89, + 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x6f, + 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x1e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, + 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x77, 0x69, 0x74, 0x68, + 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, + 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, + 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x81, 0x01, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, + 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, 0x70, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, + 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x1a, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x18, 0x70, + 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x69, 0x70, + 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x64, + 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, 0x72, 0x69, 0x70, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, 0x6f, 0x74, 0x12, 0x94, + 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x64, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x39, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x68, 0x0a, 0x23, 0x61, 0x64, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x61, 0x64, 0x64, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xc2, 0x05, 0x0a, 0x07, + 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x10, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x67, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x11, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, + 0x70, 0x61, 0x6e, 0x22, 0x28, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x3a, 0x5b, 0x9a, + 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x61, 0x67, 0x73, + 0x1a, 0xe7, 0x01, 0x0a, 0x15, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x3a, + 0x69, 0x9a, 0xc5, 0x88, 0x1e, 0x64, 0x0a, 0x62, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x98, 0x02, 0x0a, 0x1b, 0x53, + 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, + 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x63, 0x65, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75, 0x72, 0x69, 0x3a, 0x6f, + 0x9a, 0xc5, 0x88, 0x1e, 0x6a, 0x0a, 0x68, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0xae, 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x61, 0x0a, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x34, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x3a, 0x61, 0x9a, 0xc5, 0x88, 0x1e, 0x5c, 0x0a, 0x5a, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe5, 0x01, 0x0a, 0x18, 0x50, 0x61, 0x74, 0x68, 0x4e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x19, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe4, + 0x02, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x51, 0x0a, 0x25, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x73, 0x65, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x75, 0x73, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x6c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x9d, 0x02, 0x0a, 0x13, 0x48, 0x63, 0x6d, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x43, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x4e, 0x65, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x2c, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x65, 0x73, 0x74, 0x61, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x27, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x22, 0x36, 0x0a, 0x09, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, + 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x03, 0x22, 0x53, 0x0a, + 0x1a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x0a, 0x09, 0x4f, + 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x50, + 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, + 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, + 0x10, 0x02, 0x22, 0x79, 0x0a, 0x18, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x0c, + 0x0a, 0x08, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x12, + 0x0a, 0x0e, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x5f, 0x53, + 0x45, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, + 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x22, 0xa0, 0x01, + 0x0a, 0x1c, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x1f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, + 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x45, 0x45, 0x50, 0x5f, 0x55, 0x4e, 0x43, 0x48, + 0x41, 0x4e, 0x47, 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, + 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, + 0x4e, 0x45, 0x53, 0x43, 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x44, 0x49, + 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x4e, 0x45, 0x53, 0x43, 0x41, + 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x04, + 0x3a, 0x53, 0x9a, 0xc5, 0x88, 0x1e, 0x4e, 0x0a, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x11, 0x0a, + 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xca, 0x01, 0x0a, 0x10, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x62, 0x6f, 0x64, + 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9c, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, 0x08, + 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x60, 0x0a, 0x14, 0x62, 0x6f, + 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x12, 0x62, 0x6f, 0x64, 0x79, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x58, 0x0a, 0x0e, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x03, 0x52, 0x64, 0x73, 0x12, 0x51, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x41, 0x9a, + 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x64, 0x73, + 0x22, 0xf7, 0x01, 0x0a, 0x1d, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x79, 0x0a, 0x1b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x19, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x5b, 0x9a, + 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xe5, 0x0e, 0x0a, 0x0c, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x11, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x59, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x11, 0x72, 0x64, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x20, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x1d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x67, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x48, 0x00, + 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x1a, 0xdf, 0x09, 0x0a, 0x0f, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, + 0x91, 0x01, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x69, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, + 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x1a, 0xdb, 0x07, 0x0a, 0x0f, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0xb6, 0x01, 0x0a, 0x16, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x7e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x14, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x1a, 0x95, 0x05, 0x0a, 0x14, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0xa5, 0x01, 0x0a, 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x88, 0x01, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x00, + 0x52, 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0xdb, 0x01, 0x0a, 0x09, 0x4b, 0x76, + 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x19, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x8b, 0x01, 0x9a, 0xc5, 0x88, 0x1e, + 0x85, 0x01, 0x0a, 0x82, 0x01, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, + 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, + 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0x9a, 0xc5, 0x88, 0x1e, 0x7a, 0x0a, 0x78, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x9a, 0xc5, 0x88, 0x1e, 0x65, 0x0a, + 0x63, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x3a, 0x5a, 0x9a, 0xc5, 0x88, 0x1e, 0x55, 0x0a, 0x53, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x3a, 0x4a, 0x9a, + 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x17, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0xf1, 0x01, 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, + 0x12, 0x65, 0x0a, 0x18, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x15, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x72, 0x64, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x3a, 0x47, 0x9a, + 0xc5, 0x88, 0x1e, 0x42, 0x0a, 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x22, 0xe8, 0x02, 0x0a, 0x0a, 0x48, 0x74, 0x74, 0x70, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, + 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, + 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x48, 0x9a, 0xc5, 0x88, 0x1e, 0x43, 0x0a, 0x41, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, + 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x9f, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x3a, 0x50, 0x9a, 0xc5, 0x88, 0x1e, 0x4b, 0x0a, 0x49, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x8e, 0x01, 0x0a, 0x20, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x6f, 0x62, + 0x69, 0x6c, 0x65, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6a, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0xef, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x49, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x1a, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x7c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData = file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc +) + +func file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData) + }) + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData +} + +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes = []interface{}{ + (HttpConnectionManager_CodecType)(0), // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType + (HttpConnectionManager_ServerHeaderTransformation)(0), // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation + (HttpConnectionManager_ForwardClientCertDetails)(0), // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails + (HttpConnectionManager_PathWithEscapedSlashesAction)(0), // 3: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction + (HttpConnectionManager_Tracing_OperationName)(0), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.OperationName + (*HttpConnectionManager)(nil), // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + (*LocalReplyConfig)(nil), // 6: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig + (*ResponseMapper)(nil), // 7: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper + (*Rds)(nil), // 8: envoy.extensions.filters.network.http_connection_manager.v3.Rds + (*ScopedRouteConfigurationsList)(nil), // 9: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList + (*ScopedRoutes)(nil), // 10: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes + (*ScopedRds)(nil), // 11: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds + (*HttpFilter)(nil), // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + (*RequestIDExtension)(nil), // 13: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + (*EnvoyMobileHttpConnectionManager)(nil), // 14: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager + (*HttpConnectionManager_Tracing)(nil), // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing + (*HttpConnectionManager_InternalAddressConfig)(nil), // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig + (*HttpConnectionManager_SetCurrentClientCertDetails)(nil), // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails + (*HttpConnectionManager_UpgradeConfig)(nil), // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig + (*HttpConnectionManager_PathNormalizationOptions)(nil), // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions + (*HttpConnectionManager_ProxyStatusConfig)(nil), // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig + (*HttpConnectionManager_HcmAccessLogOptions)(nil), // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + (*ScopedRoutes_ScopeKeyBuilder)(nil), // 22: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)(nil), // 23: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor)(nil), // 24: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement)(nil), // 25: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + (*v32.RouteConfiguration)(nil), // 26: envoy.config.route.v3.RouteConfiguration + (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue + (*v3.HttpProtocolOptions)(nil), // 28: envoy.config.core.v3.HttpProtocolOptions + (*v3.Http1ProtocolOptions)(nil), // 29: envoy.config.core.v3.Http1ProtocolOptions + (*v3.Http2ProtocolOptions)(nil), // 30: envoy.config.core.v3.Http2ProtocolOptions + (*v3.Http3ProtocolOptions)(nil), // 31: envoy.config.core.v3.Http3ProtocolOptions + (*v3.SchemeHeaderTransformation)(nil), // 32: envoy.config.core.v3.SchemeHeaderTransformation + (*wrapperspb.UInt32Value)(nil), // 33: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 34: google.protobuf.Duration + (*v31.AccessLog)(nil), // 35: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 36: envoy.config.core.v3.TypedExtensionConfig + (*v3.SubstitutionFormatString)(nil), // 37: envoy.config.core.v3.SubstitutionFormatString + (*v31.AccessLogFilter)(nil), // 38: envoy.config.accesslog.v3.AccessLogFilter + (*v3.DataSource)(nil), // 39: envoy.config.core.v3.DataSource + (*v3.HeaderValueOption)(nil), // 40: envoy.config.core.v3.HeaderValueOption + (*v3.ConfigSource)(nil), // 41: envoy.config.core.v3.ConfigSource + (*v32.ScopedRouteConfiguration)(nil), // 42: envoy.config.route.v3.ScopedRouteConfiguration + (*anypb.Any)(nil), // 43: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 44: envoy.config.core.v3.ExtensionConfigSource + (*v33.Percent)(nil), // 45: envoy.type.v3.Percent + (*v34.CustomTag)(nil), // 46: envoy.type.tracing.v3.CustomTag + (*v35.Tracing_Http)(nil), // 47: envoy.config.trace.v3.Tracing.Http + (*v3.CidrRange)(nil), // 48: envoy.config.core.v3.CidrRange + (*v36.PathTransformation)(nil), // 49: envoy.type.http.v3.PathTransformation +} +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.codec_type:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType + 8, // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.Rds + 26, // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config:type_name -> envoy.config.route.v3.RouteConfiguration + 10, // 3: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scoped_routes:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes + 12, // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 27, // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent:type_name -> google.protobuf.BoolValue + 15, // 6: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.tracing:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing + 28, // 7: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 29, // 8: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 30, // 9: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 31, // 10: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http3_protocol_options:type_name -> envoy.config.core.v3.Http3ProtocolOptions + 1, // 11: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.server_header_transformation:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation + 32, // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scheme_header_transformation:type_name -> envoy.config.core.v3.SchemeHeaderTransformation + 33, // 13: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.max_request_headers_kb:type_name -> google.protobuf.UInt32Value + 34, // 14: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout:type_name -> google.protobuf.Duration + 34, // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout:type_name -> google.protobuf.Duration + 34, // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_headers_timeout:type_name -> google.protobuf.Duration + 34, // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout:type_name -> google.protobuf.Duration + 34, // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.delayed_close_timeout:type_name -> google.protobuf.Duration + 35, // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 34, // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_flush_interval:type_name -> google.protobuf.Duration + 21, // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + 27, // 22: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address:type_name -> google.protobuf.BoolValue + 36, // 23: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.original_ip_detection_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 36, // 24: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.early_header_mutation_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 16, // 25: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.internal_address_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig + 27, // 26: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.generate_request_id:type_name -> google.protobuf.BoolValue + 2, // 27: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails + 17, // 28: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.set_current_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails + 18, // 29: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig + 27, // 30: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.normalize_path:type_name -> google.protobuf.BoolValue + 3, // 31: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_with_escaped_slashes_action:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction + 13, // 32: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_id_extension:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + 6, // 33: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.local_reply_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig + 27, // 34: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 19, // 35: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_normalization_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions + 20, // 36: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.proxy_status_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig + 36, // 37: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.typed_header_validation_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 27, // 38: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_proxy_protocol_connection_state:type_name -> google.protobuf.BoolValue + 7, // 39: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.mappers:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper + 37, // 40: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format:type_name -> envoy.config.core.v3.SubstitutionFormatString + 38, // 41: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 33, // 42: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.status_code:type_name -> google.protobuf.UInt32Value + 39, // 43: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body:type_name -> envoy.config.core.v3.DataSource + 37, // 44: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override:type_name -> envoy.config.core.v3.SubstitutionFormatString + 40, // 45: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 41, // 46: envoy.extensions.filters.network.http_connection_manager.v3.Rds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 42, // 47: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList.scoped_route_configurations:type_name -> envoy.config.route.v3.ScopedRouteConfiguration + 22, // 48: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + 41, // 49: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 9, // 50: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList + 11, // 51: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds + 41, // 52: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds.scoped_rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 43, // 53: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.typed_config:type_name -> google.protobuf.Any + 44, // 54: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 43, // 55: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension.typed_config:type_name -> google.protobuf.Any + 5, // 56: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager.config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + 45, // 57: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.client_sampling:type_name -> envoy.type.v3.Percent + 45, // 58: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.random_sampling:type_name -> envoy.type.v3.Percent + 45, // 59: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.overall_sampling:type_name -> envoy.type.v3.Percent + 33, // 60: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.max_path_tag_length:type_name -> google.protobuf.UInt32Value + 46, // 61: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 47, // 62: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider:type_name -> envoy.config.trace.v3.Tracing.Http + 27, // 63: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.spawn_upstream_span:type_name -> google.protobuf.BoolValue + 48, // 64: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig.cidr_ranges:type_name -> envoy.config.core.v3.CidrRange + 27, // 65: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails.subject:type_name -> google.protobuf.BoolValue + 12, // 66: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 27, // 67: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 49, // 68: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.forwarding_transformation:type_name -> envoy.type.http.v3.PathTransformation + 49, // 69: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.http_filter_transformation:type_name -> envoy.type.http.v3.PathTransformation + 34, // 70: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions.access_log_flush_interval:type_name -> google.protobuf.Duration + 23, // 71: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.fragments:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + 24, // 72: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.header_value_extractor:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + 25, // 73: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.element:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + 74, // [74:74] is the sub-list for method output_type + 74, // [74:74] is the sub-list for method input_type + 74, // [74:74] is the sub-list for extension type_name + 74, // [74:74] is the sub-list for extension extendee + 0, // [0:74] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_init() +} +func file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_init() { + if File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalReplyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseMapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Rds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfigurationsList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestIDExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnvoyMobileHttpConnectionManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_InternalAddressConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_SetCurrentClientCertDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_UpgradeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_PathNormalizationOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_ProxyStatusConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_HcmAccessLogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HttpConnectionManager_Rds)(nil), + (*HttpConnectionManager_RouteConfig)(nil), + (*HttpConnectionManager_ScopedRoutes)(nil), + (*HttpConnectionManager_StripAnyHostPort)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopedRouteConfigurationsList)(nil), + (*ScopedRoutes_ScopedRds)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*HttpFilter_TypedConfig)(nil), + (*HttpFilter_ConfigDiscovery)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*HttpConnectionManager_ProxyStatusConfig_UseNodeId)(nil), + (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index)(nil), + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc, + NumEnums: 5, + NumMessages: 21, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs, + EnumInfos: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes, + MessageInfos: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto = out.File + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc = nil + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes = nil + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go new file mode 100644 index 000000000..094e7faaf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go @@ -0,0 +1,4812 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpConnectionManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpConnectionManagerMultiError, or nil if none found. +func (m *HttpConnectionManager) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := HttpConnectionManager_CodecType_name[int32(m.GetCodecType())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "CodecType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetStatPrefix()) < 1 { + err := HttpConnectionManagerValidationError{ + field: "StatPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHttpFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetAddUserAgent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddUserAgent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Http1SafeMaxConnectionDuration + + if all { + switch v := interface{}(m.GetHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp3ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp3ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_HttpConnectionManager_ServerName_Pattern.MatchString(m.GetServerName()) { + err := HttpConnectionManagerValidationError{ + field: "ServerName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := HttpConnectionManager_ServerHeaderTransformation_name[int32(m.GetServerHeaderTransformation())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "ServerHeaderTransformation", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSchemeHeaderTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSchemeHeaderTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxRequestHeadersKb(); wrapper != nil { + + if val := wrapper.GetValue(); val <= 0 || val > 8192 { + err := HttpConnectionManagerValidationError{ + field: "MaxRequestHeadersKb", + reason: "value must be inside range (0, 8192]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetStreamIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequestTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetRequestHeadersTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManagerValidationError{ + field: "RequestHeadersTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManagerValidationError{ + field: "RequestHeadersTimeout", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetDrainTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDelayedCloseTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDelayedCloseTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + if all { + switch v := interface{}(m.GetAccessLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAccessLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for XffNumTrustedHops + + for idx, item := range m.GetOriginalIpDetectionExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetEarlyHeaderMutationExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInternalAddressConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalAddressConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SkipXffAppend + + if !_HttpConnectionManager_Via_Pattern.MatchString(m.GetVia()) { + err := HttpConnectionManagerValidationError{ + field: "Via", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGenerateRequestId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenerateRequestId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PreserveExternalRequestId + + // no validation rules for AlwaysSetRequestIdInResponse + + if _, ok := HttpConnectionManager_ForwardClientCertDetails_name[int32(m.GetForwardClientCertDetails())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "ForwardClientCertDetails", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSetCurrentClientCertDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSetCurrentClientCertDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Proxy_100Continue + + // no validation rules for RepresentIpv4RemoteAddressAsIpv4MappedIpv6 + + for idx, item := range m.GetUpgradeConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetNormalizePath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNormalizePath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for MergeSlashes + + // no validation rules for PathWithEscapedSlashesAction + + if all { + switch v := interface{}(m.GetRequestIdExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestIdExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalReplyConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalReplyConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StripMatchingHostPort + + if all { + switch v := interface{}(m.GetStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPathNormalizationOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathNormalizationOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StripTrailingHostDot + + if all { + switch v := interface{}(m.GetProxyStatusConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyStatusConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedHeaderValidationConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedHeaderValidationConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AppendXForwardedPort + + // no validation rules for AppendLocalOverload + + if all { + switch v := interface{}(m.GetAddProxyProtocolConnectionState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddProxyProtocolConnectionState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofRouteSpecifierPresent := false + switch v := m.RouteSpecifier.(type) { + case *HttpConnectionManager_Rds: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpConnectionManager_RouteConfig: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpConnectionManager_ScopedRoutes: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRoutes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRoutes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRouteSpecifierPresent { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + switch v := m.StripPortMode.(type) { + case *HttpConnectionManager_StripAnyHostPort: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "StripPortMode", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for StripAnyHostPort + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpConnectionManagerMultiError(errors) + } + + return nil +} + +// HttpConnectionManagerMultiError is an error wrapping multiple validation +// errors returned by HttpConnectionManager.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManagerMultiError) AllErrors() []error { return m } + +// HttpConnectionManagerValidationError is the validation error returned by +// HttpConnectionManager.Validate if the designated constraints aren't met. +type HttpConnectionManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManagerValidationError) ErrorName() string { + return "HttpConnectionManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManagerValidationError{} + +var _HttpConnectionManager_ServerName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HttpConnectionManager_Via_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on LocalReplyConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LocalReplyConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalReplyConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalReplyConfigMultiError, or nil if none found. +func (m *LocalReplyConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalReplyConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetMappers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetBodyFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBodyFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return LocalReplyConfigMultiError(errors) + } + + return nil +} + +// LocalReplyConfigMultiError is an error wrapping multiple validation errors +// returned by LocalReplyConfig.ValidateAll() if the designated constraints +// aren't met. +type LocalReplyConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalReplyConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalReplyConfigMultiError) AllErrors() []error { return m } + +// LocalReplyConfigValidationError is the validation error returned by +// LocalReplyConfig.Validate if the designated constraints aren't met. +type LocalReplyConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalReplyConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalReplyConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalReplyConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalReplyConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalReplyConfigValidationError) ErrorName() string { return "LocalReplyConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LocalReplyConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalReplyConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalReplyConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalReplyConfigValidationError{} + +// Validate checks the field values on ResponseMapper with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResponseMapper) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseMapper with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResponseMapperMultiError, +// or nil if none found. +func (m *ResponseMapper) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseMapper) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetFilter() == nil { + err := ResponseMapperValidationError{ + field: "Filter", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetStatusCode(); wrapper != nil { + + if val := wrapper.GetValue(); val < 200 || val >= 600 { + err := ResponseMapperValidationError{ + field: "StatusCode", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetBody()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBody()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetBodyFormatOverride()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBodyFormatOverride()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetHeadersToAdd()) > 1000 { + err := ResponseMapperValidationError{ + field: "HeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResponseMapperMultiError(errors) + } + + return nil +} + +// ResponseMapperMultiError is an error wrapping multiple validation errors +// returned by ResponseMapper.ValidateAll() if the designated constraints +// aren't met. +type ResponseMapperMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseMapperMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseMapperMultiError) AllErrors() []error { return m } + +// ResponseMapperValidationError is the validation error returned by +// ResponseMapper.Validate if the designated constraints aren't met. +type ResponseMapperValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseMapperValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseMapperValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseMapperValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseMapperValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseMapperValidationError) ErrorName() string { return "ResponseMapperValidationError" } + +// Error satisfies the builtin error interface +func (e ResponseMapperValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseMapper.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseMapperValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseMapperValidationError{} + +// Validate checks the field values on Rds with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Rds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Rds with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RdsMultiError, or nil if none found. +func (m *Rds) ValidateAll() error { + return m.validate(true) +} + +func (m *Rds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := RdsValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RouteConfigName + + if len(errors) > 0 { + return RdsMultiError(errors) + } + + return nil +} + +// RdsMultiError is an error wrapping multiple validation errors returned by +// Rds.ValidateAll() if the designated constraints aren't met. +type RdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RdsMultiError) AllErrors() []error { return m } + +// RdsValidationError is the validation error returned by Rds.Validate if the +// designated constraints aren't met. +type RdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RdsValidationError) ErrorName() string { return "RdsValidationError" } + +// Error satisfies the builtin error interface +func (e RdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RdsValidationError{} + +// Validate checks the field values on ScopedRouteConfigurationsList with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfigurationsList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfigurationsList with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ScopedRouteConfigurationsListMultiError, or nil if none found. +func (m *ScopedRouteConfigurationsList) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfigurationsList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetScopedRouteConfigurations()) < 1 { + err := ScopedRouteConfigurationsListValidationError{ + field: "ScopedRouteConfigurations", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetScopedRouteConfigurations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRouteConfigurationsListMultiError(errors) + } + + return nil +} + +// ScopedRouteConfigurationsListMultiError is an error wrapping multiple +// validation errors returned by ScopedRouteConfigurationsList.ValidateAll() +// if the designated constraints aren't met. +type ScopedRouteConfigurationsListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfigurationsListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfigurationsListMultiError) AllErrors() []error { return m } + +// ScopedRouteConfigurationsListValidationError is the validation error +// returned by ScopedRouteConfigurationsList.Validate if the designated +// constraints aren't met. +type ScopedRouteConfigurationsListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfigurationsListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfigurationsListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfigurationsListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfigurationsListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfigurationsListValidationError) ErrorName() string { + return "ScopedRouteConfigurationsListValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfigurationsListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfigurationsList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfigurationsListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfigurationsListValidationError{} + +// Validate checks the field values on ScopedRoutes with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutes with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScopedRoutesMultiError, or +// nil if none found. +func (m *ScopedRoutes) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRoutesValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetScopeKeyBuilder() == nil { + err := ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopeKeyBuilder()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopeKeyBuilder()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRdsConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRdsConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofConfigSpecifierPresent := false + switch v := m.ConfigSpecifier.(type) { + case *ScopedRoutes_ScopedRouteConfigurationsList: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRouteConfigurationsList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRouteConfigurationsList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ScopedRoutes_ScopedRds: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSpecifierPresent { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutesMultiError(errors) + } + + return nil +} + +// ScopedRoutesMultiError is an error wrapping multiple validation errors +// returned by ScopedRoutes.ValidateAll() if the designated constraints aren't met. +type ScopedRoutesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesMultiError) AllErrors() []error { return m } + +// ScopedRoutesValidationError is the validation error returned by +// ScopedRoutes.Validate if the designated constraints aren't met. +type ScopedRoutesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesValidationError) ErrorName() string { return "ScopedRoutesValidationError" } + +// Error satisfies the builtin error interface +func (e ScopedRoutesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesValidationError{} + +// Validate checks the field values on ScopedRds with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRds with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScopedRdsMultiError, or nil +// if none found. +func (m *ScopedRds) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetScopedRdsConfigSource() == nil { + err := ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopedRdsConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRdsConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SrdsResourcesLocator + + if len(errors) > 0 { + return ScopedRdsMultiError(errors) + } + + return nil +} + +// ScopedRdsMultiError is an error wrapping multiple validation errors returned +// by ScopedRds.ValidateAll() if the designated constraints aren't met. +type ScopedRdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRdsMultiError) AllErrors() []error { return m } + +// ScopedRdsValidationError is the validation error returned by +// ScopedRds.Validate if the designated constraints aren't met. +type ScopedRdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRdsValidationError) ErrorName() string { return "ScopedRdsValidationError" } + +// Error satisfies the builtin error interface +func (e ScopedRdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRdsValidationError{} + +// Validate checks the field values on HttpFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpFilterMultiError, or +// nil if none found. +func (m *HttpFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HttpFilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for IsOptional + + // no validation rules for Disabled + + switch v := m.ConfigType.(type) { + case *HttpFilter_TypedConfig: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpFilter_ConfigDiscovery: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpFilterMultiError(errors) + } + + return nil +} + +// HttpFilterMultiError is an error wrapping multiple validation errors +// returned by HttpFilter.ValidateAll() if the designated constraints aren't met. +type HttpFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpFilterMultiError) AllErrors() []error { return m } + +// HttpFilterValidationError is the validation error returned by +// HttpFilter.Validate if the designated constraints aren't met. +type HttpFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpFilterValidationError) ErrorName() string { return "HttpFilterValidationError" } + +// Error satisfies the builtin error interface +func (e HttpFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpFilterValidationError{} + +// Validate checks the field values on RequestIDExtension with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RequestIDExtension) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RequestIDExtension with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RequestIDExtensionMultiError, or nil if none found. +func (m *RequestIDExtension) ValidateAll() error { + return m.validate(true) +} + +func (m *RequestIDExtension) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RequestIDExtensionMultiError(errors) + } + + return nil +} + +// RequestIDExtensionMultiError is an error wrapping multiple validation errors +// returned by RequestIDExtension.ValidateAll() if the designated constraints +// aren't met. +type RequestIDExtensionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RequestIDExtensionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RequestIDExtensionMultiError) AllErrors() []error { return m } + +// RequestIDExtensionValidationError is the validation error returned by +// RequestIDExtension.Validate if the designated constraints aren't met. +type RequestIDExtensionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RequestIDExtensionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RequestIDExtensionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RequestIDExtensionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RequestIDExtensionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RequestIDExtensionValidationError) ErrorName() string { + return "RequestIDExtensionValidationError" +} + +// Error satisfies the builtin error interface +func (e RequestIDExtensionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRequestIDExtension.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RequestIDExtensionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RequestIDExtensionValidationError{} + +// Validate checks the field values on EnvoyMobileHttpConnectionManager with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *EnvoyMobileHttpConnectionManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EnvoyMobileHttpConnectionManager with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// EnvoyMobileHttpConnectionManagerMultiError, or nil if none found. +func (m *EnvoyMobileHttpConnectionManager) ValidateAll() error { + return m.validate(true) +} + +func (m *EnvoyMobileHttpConnectionManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EnvoyMobileHttpConnectionManagerMultiError(errors) + } + + return nil +} + +// EnvoyMobileHttpConnectionManagerMultiError is an error wrapping multiple +// validation errors returned by +// EnvoyMobileHttpConnectionManager.ValidateAll() if the designated +// constraints aren't met. +type EnvoyMobileHttpConnectionManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EnvoyMobileHttpConnectionManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EnvoyMobileHttpConnectionManagerMultiError) AllErrors() []error { return m } + +// EnvoyMobileHttpConnectionManagerValidationError is the validation error +// returned by EnvoyMobileHttpConnectionManager.Validate if the designated +// constraints aren't met. +type EnvoyMobileHttpConnectionManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EnvoyMobileHttpConnectionManagerValidationError) ErrorName() string { + return "EnvoyMobileHttpConnectionManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e EnvoyMobileHttpConnectionManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEnvoyMobileHttpConnectionManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EnvoyMobileHttpConnectionManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EnvoyMobileHttpConnectionManagerValidationError{} + +// Validate checks the field values on HttpConnectionManager_Tracing with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager_Tracing with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpConnectionManager_TracingMultiError, or nil if none found. +func (m *HttpConnectionManager_Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetClientSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRandomSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRandomSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverallSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverallSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Verbose + + if all { + switch v := interface{}(m.GetMaxPathTagLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxPathTagLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSpawnUpstreamSpan()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSpawnUpstreamSpan()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_TracingMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_TracingMultiError is an error wrapping multiple +// validation errors returned by HttpConnectionManager_Tracing.ValidateAll() +// if the designated constraints aren't met. +type HttpConnectionManager_TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_TracingMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_TracingValidationError is the validation error +// returned by HttpConnectionManager_Tracing.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_TracingValidationError) ErrorName() string { + return "HttpConnectionManager_TracingValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_Tracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_TracingValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_InternalAddressConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_InternalAddressConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_InternalAddressConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// HttpConnectionManager_InternalAddressConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_InternalAddressConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_InternalAddressConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UnixSockets + + for idx, item := range m.GetCidrRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpConnectionManager_InternalAddressConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_InternalAddressConfigMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_InternalAddressConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_InternalAddressConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_InternalAddressConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_InternalAddressConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_InternalAddressConfigValidationError is the validation +// error returned by HttpConnectionManager_InternalAddressConfig.Validate if +// the designated constraints aren't met. +type HttpConnectionManager_InternalAddressConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_InternalAddressConfigValidationError) ErrorName() string { + return "HttpConnectionManager_InternalAddressConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_InternalAddressConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_InternalAddressConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_InternalAddressConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_InternalAddressConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_SetCurrentClientCertDetails with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_SetCurrentClientCertDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_SetCurrentClientCertDetails with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// HttpConnectionManager_SetCurrentClientCertDetailsMultiError, or nil if none found. +func (m *HttpConnectionManager_SetCurrentClientCertDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSubject()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSubject()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Cert + + // no validation rules for Chain + + // no validation rules for Dns + + // no validation rules for Uri + + if len(errors) > 0 { + return HttpConnectionManager_SetCurrentClientCertDetailsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_SetCurrentClientCertDetailsMultiError is an error +// wrapping multiple validation errors returned by +// HttpConnectionManager_SetCurrentClientCertDetails.ValidateAll() if the +// designated constraints aren't met. +type HttpConnectionManager_SetCurrentClientCertDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_SetCurrentClientCertDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_SetCurrentClientCertDetailsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_SetCurrentClientCertDetailsValidationError is the +// validation error returned by +// HttpConnectionManager_SetCurrentClientCertDetails.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_SetCurrentClientCertDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) ErrorName() string { + return "HttpConnectionManager_SetCurrentClientCertDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_SetCurrentClientCertDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_SetCurrentClientCertDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_SetCurrentClientCertDetailsValidationError{} + +// Validate checks the field values on HttpConnectionManager_UpgradeConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *HttpConnectionManager_UpgradeConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager_UpgradeConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpConnectionManager_UpgradeConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_UpgradeConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_UpgradeConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UpgradeType + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_UpgradeConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_UpgradeConfigMultiError is an error wrapping multiple +// validation errors returned by +// HttpConnectionManager_UpgradeConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_UpgradeConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_UpgradeConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_UpgradeConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_UpgradeConfigValidationError is the validation error +// returned by HttpConnectionManager_UpgradeConfig.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_UpgradeConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_UpgradeConfigValidationError) ErrorName() string { + return "HttpConnectionManager_UpgradeConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_UpgradeConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_UpgradeConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_UpgradeConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_UpgradeConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_PathNormalizationOptions with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_PathNormalizationOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_PathNormalizationOptions with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// HttpConnectionManager_PathNormalizationOptionsMultiError, or nil if none found. +func (m *HttpConnectionManager_PathNormalizationOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_PathNormalizationOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetForwardingTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetForwardingTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpFilterTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpFilterTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_PathNormalizationOptionsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_PathNormalizationOptionsMultiError is an error +// wrapping multiple validation errors returned by +// HttpConnectionManager_PathNormalizationOptions.ValidateAll() if the +// designated constraints aren't met. +type HttpConnectionManager_PathNormalizationOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_PathNormalizationOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_PathNormalizationOptionsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_PathNormalizationOptionsValidationError is the +// validation error returned by +// HttpConnectionManager_PathNormalizationOptions.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_PathNormalizationOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) ErrorName() string { + return "HttpConnectionManager_PathNormalizationOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_PathNormalizationOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_PathNormalizationOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_PathNormalizationOptionsValidationError{} + +// Validate checks the field values on HttpConnectionManager_ProxyStatusConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpConnectionManager_ProxyStatusConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_ProxyStatusConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// HttpConnectionManager_ProxyStatusConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_ProxyStatusConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_ProxyStatusConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RemoveDetails + + // no validation rules for RemoveConnectionTerminationDetails + + // no validation rules for RemoveResponseFlags + + // no validation rules for SetRecommendedResponseCode + + switch v := m.ProxyName.(type) { + case *HttpConnectionManager_ProxyStatusConfig_UseNodeId: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for UseNodeId + case *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for LiteralProxyName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpConnectionManager_ProxyStatusConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_ProxyStatusConfigMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_ProxyStatusConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_ProxyStatusConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_ProxyStatusConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_ProxyStatusConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_ProxyStatusConfigValidationError is the validation +// error returned by HttpConnectionManager_ProxyStatusConfig.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_ProxyStatusConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) ErrorName() string { + return "HttpConnectionManager_ProxyStatusConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_ProxyStatusConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_ProxyStatusConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_ProxyStatusConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_HcmAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// HttpConnectionManager_HcmAccessLogOptionsMultiError, or nil if none found. +func (m *HttpConnectionManager_HcmAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + // no validation rules for FlushLogOnTunnelSuccessfullyEstablished + + if len(errors) > 0 { + return HttpConnectionManager_HcmAccessLogOptionsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_HcmAccessLogOptionsMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_HcmAccessLogOptions.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_HcmAccessLogOptionsValidationError is the validation +// error returned by HttpConnectionManager_HcmAccessLogOptions.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) ErrorName() string { + return "HttpConnectionManager_HcmAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_HcmAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + +// Validate checks the field values on ScopedRoutes_ScopeKeyBuilder with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutes_ScopeKeyBuilder with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilderMultiError, or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFragments()) < 1 { + err := ScopedRoutes_ScopeKeyBuilderValidationError{ + field: "Fragments", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFragments() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilderMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilderMultiError is an error wrapping multiple +// validation errors returned by ScopedRoutes_ScopeKeyBuilder.ValidateAll() if +// the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilderMultiError) AllErrors() []error { return m } + +// ScopedRoutes_ScopeKeyBuilderValidationError is the validation error returned +// by ScopedRoutes_ScopeKeyBuilder.Validate if the designated constraints +// aren't met. +type ScopedRoutes_ScopeKeyBuilderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilderValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilderValidationError{} + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError, or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofTypePresent := false + switch v := m.Type.(type) { + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderValueExtractor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderValueExtractor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError is an error wrapping +// multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError) AllErrors() []error { return m } + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError is the +// validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.Validate if the designated +// constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{} + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError, +// or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Name_Pattern.MatchString(m.GetName()) { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ElementSeparator + + switch v := m.ExtractType.(type) { + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Index + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetElement()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetElement()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError +// is an error wrapping multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.ValidateAll() +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError) AllErrors() []error { + return m +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError +// is the validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.Validate +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{} + +var _ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError, +// or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetSeparator()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{ + field: "Separator", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError +// is an error wrapping multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.ValidateAll() +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError) AllErrors() []error { + return m +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError +// is the validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.Validate +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go new file mode 100644 index 000000000..3ecbe1295 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go @@ -0,0 +1,3470 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpConnectionManager_Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SpawnUpstreamSpan != nil { + size, err := (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.Provider != nil { + if vtmsg, ok := interface{}(m.Provider).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Provider) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.MaxPathTagLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPathTagLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CidrRanges) > 0 { + for iNdEx := len(m.CidrRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CidrRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CidrRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.UnixSockets { + i-- + if m.UnixSockets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Chain { + i-- + if m.Chain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Uri { + i-- + if m.Uri { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Dns { + i-- + if m.Dns { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Cert { + i-- + if m.Cert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Subject != nil { + size, err := (*wrapperspb.BoolValue)(m.Subject).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HttpFilterTransformation != nil { + if vtmsg, ok := interface{}(m.HttpFilterTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpFilterTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ForwardingTransformation != nil { + if vtmsg, ok := interface{}(m.ForwardingTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ForwardingTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_UseNodeId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SetRecommendedResponseCode { + i-- + if m.SetRecommendedResponseCode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.RemoveResponseFlags { + i-- + if m.RemoveResponseFlags { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RemoveConnectionTerminationDetails { + i-- + if m.RemoveConnectionTerminationDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RemoveDetails { + i-- + if m.RemoveDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.UseNodeId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.LiteralProxyName) + copy(dAtA[i:], m.LiteralProxyName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LiteralProxyName))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + i-- + if m.FlushLogOnTunnelSuccessfullyEstablished { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http1SafeMaxConnectionDuration { + i-- + if m.Http1SafeMaxConnectionDuration { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xd0 + } + if m.AppendLocalOverload { + i-- + if m.AppendLocalOverload { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc8 + } + if m.AccessLogOptions != nil { + size, err := m.AccessLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb8 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.AddProxyProtocolConnectionState != nil { + size, err := (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for iNdEx := len(m.EarlyHeaderMutationExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.EarlyHeaderMutationExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyHeaderMutationExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + } + if m.AppendXForwardedPort { + i-- + if m.AppendXForwardedPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.TypedHeaderValidationConfig != nil { + if vtmsg, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedHeaderValidationConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.ProxyStatusConfig != nil { + size, err := m.ProxyStatusConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.SchemeHeaderTransformation != nil { + if vtmsg, ok := interface{}(m.SchemeHeaderTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SchemeHeaderTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.StripTrailingHostDot { + i-- + if m.StripTrailingHostDot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for iNdEx := len(m.OriginalIpDetectionExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.OriginalIpDetectionExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OriginalIpDetectionExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + } + if m.PathWithEscapedSlashesAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PathWithEscapedSlashesAction)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.Http3ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http3ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http3ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if m.PathNormalizationOptions != nil { + size, err := m.PathNormalizationOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if msg, ok := m.StripPortMode.(*HttpConnectionManager_StripAnyHostPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequestHeadersTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestHeadersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.StreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.StripMatchingHostPort { + i-- + if m.StripMatchingHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if m.LocalReplyConfig != nil { + size, err := m.LocalReplyConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.AlwaysSetRequestIdInResponse { + i-- + if m.AlwaysSetRequestIdInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.RequestIdExtension != nil { + size, err := m.RequestIdExtension.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.ServerHeaderTransformation != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ServerHeaderTransformation)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.MergeSlashes { + i-- + if m.MergeSlashes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.PreserveExternalRequestId { + i-- + if m.PreserveExternalRequestId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_ScopedRoutes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NormalizePath != nil { + size, err := (*wrapperspb.BoolValue)(m.NormalizePath).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.MaxRequestHeadersKb != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.DelayedCloseTimeout != nil { + size, err := (*durationpb.Duration)(m.DelayedCloseTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.InternalAddressConfig != nil { + size, err := m.InternalAddressConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.StreamIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.StreamIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if len(m.Via) > 0 { + i -= len(m.Via) + copy(dAtA[i:], m.Via) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Via))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.SkipXffAppend { + i-- + if m.SkipXffAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + i-- + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.XffNumTrustedHops != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.XffNumTrustedHops)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.Proxy_100Continue { + i-- + if m.Proxy_100Continue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.SetCurrentClientCertDetails != nil { + size, err := m.SetCurrentClientCertDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ForwardClientCertDetails != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ForwardClientCertDetails)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.GenerateRequestId != nil { + size, err := (*wrapperspb.BoolValue)(m.GenerateRequestId).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UseRemoteAddress != nil { + size, err := (*wrapperspb.BoolValue)(m.UseRemoteAddress).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.DrainTimeout != nil { + size, err := (*durationpb.Duration)(m.DrainTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.ServerName) > 0 { + i -= len(m.ServerName) + copy(dAtA[i:], m.ServerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerName))) + i-- + dAtA[i] = 0x52 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AddUserAgent != nil { + size, err := (*wrapperspb.BoolValue)(m.AddUserAgent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HttpFilters) > 0 { + for iNdEx := len(m.HttpFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HttpFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_Rds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x12 + } + if m.CodecType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Rds != nil { + size, err := m.Rds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRoutes != nil { + size, err := m.ScopedRoutes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StripAnyHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd0 + return len(dAtA) - i, nil +} +func (m *LocalReplyConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalReplyConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalReplyConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BodyFormat != nil { + if vtmsg, ok := interface{}(m.BodyFormat).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormat) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Mappers) > 0 { + for iNdEx := len(m.Mappers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Mappers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseMapper) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseMapper) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseMapper) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeadersToAdd) > 0 { + for iNdEx := len(m.HeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.BodyFormatOverride != nil { + if vtmsg, ok := interface{}(m.BodyFormatOverride).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormatOverride) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.StatusCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.StatusCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Filter != nil { + if vtmsg, ok := interface{}(m.Filter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Filter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Rds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RouteConfigName) > 0 { + i -= len(m.RouteConfigName) + copy(dAtA[i:], m.RouteConfigName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigName))) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfigurationsList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScopedRouteConfigurations) > 0 { + for iNdEx := len(m.ScopedRouteConfigurations) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ScopedRouteConfigurations[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfigurations[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Separator) > 0 { + i -= len(m.Separator) + copy(dAtA[i:], m.Separator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Separator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ElementSeparator) > 0 { + i -= len(m.ElementSeparator) + copy(dAtA[i:], m.ElementSeparator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ElementSeparator))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Element != nil { + size, err := m.Element.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueExtractor != nil { + size, err := m.HeaderValueExtractor.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRouteConfigurationsList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RdsConfigSource != nil { + if vtmsg, ok := interface{}(m.RdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ScopeKeyBuilder != nil { + size, err := m.ScopeKeyBuilder.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfigurationsList != nil { + size, err := m.ScopedRouteConfigurationsList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRds != nil { + size, err := m.ScopedRds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ScopedRds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SrdsResourcesLocator) > 0 { + i -= len(m.SrdsResourcesLocator) + copy(dAtA[i:], m.SrdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SrdsResourcesLocator))) + i-- + dAtA[i] = 0x12 + } + if m.ScopedRdsConfigSource != nil { + if vtmsg, ok := interface{}(m.ScopedRdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigType.(*HttpFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*HttpFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RequestIDExtension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestIDExtension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RequestIDExtension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := m.Config.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Verbose { + n += 2 + } + if m.MaxPathTagLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPathTagLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Provider != nil { + if size, ok := interface{}(m.Provider).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Provider) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SpawnUpstreamSpan != nil { + l = (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_InternalAddressConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UnixSockets { + n += 2 + } + if len(m.CidrRanges) > 0 { + for _, e := range m.CidrRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Subject != nil { + l = (*wrapperspb.BoolValue)(m.Subject).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cert { + n += 2 + } + if m.Dns { + n += 2 + } + if m.Uri { + n += 2 + } + if m.Chain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_PathNormalizationOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ForwardingTransformation != nil { + if size, ok := interface{}(m.ForwardingTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ForwardingTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpFilterTransformation != nil { + if size, ok := interface{}(m.HttpFilterTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpFilterTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoveDetails { + n += 2 + } + if m.RemoveConnectionTerminationDetails { + n += 2 + } + if m.RemoveResponseFlags { + n += 2 + } + if m.SetRecommendedResponseCode { + n += 2 + } + if vtmsg, ok := m.ProxyName.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LiteralProxyName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpConnectionManager_HcmAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 2 + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CodecType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecType)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RouteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.HttpFilters) > 0 { + for _, e := range m.HttpFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddUserAgent != nil { + l = (*wrapperspb.BoolValue)(m.AddUserAgent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServerName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTimeout != nil { + l = (*durationpb.Duration)(m.DrainTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseRemoteAddress != nil { + l = (*wrapperspb.BoolValue)(m.UseRemoteAddress).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GenerateRequestId != nil { + l = (*wrapperspb.BoolValue)(m.GenerateRequestId).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardClientCertDetails != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ForwardClientCertDetails)) + } + if m.SetCurrentClientCertDetails != nil { + l = m.SetCurrentClientCertDetails.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Proxy_100Continue { + n += 3 + } + if m.XffNumTrustedHops != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.XffNumTrustedHops)) + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + n += 3 + } + if m.SkipXffAppend { + n += 3 + } + l = len(m.Via) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StreamIdleTimeout != nil { + l = (*durationpb.Duration)(m.StreamIdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalAddressConfig != nil { + l = m.InternalAddressConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DelayedCloseTimeout != nil { + l = (*durationpb.Duration)(m.DelayedCloseTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequestHeadersKb != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NormalizePath != nil { + l = (*wrapperspb.BoolValue)(m.NormalizePath).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreserveExternalRequestId { + n += 3 + } + if m.MergeSlashes { + n += 3 + } + if m.ServerHeaderTransformation != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ServerHeaderTransformation)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestIdExtension != nil { + l = m.RequestIdExtension.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AlwaysSetRequestIdInResponse { + n += 3 + } + if m.LocalReplyConfig != nil { + l = m.LocalReplyConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StripMatchingHostPort { + n += 3 + } + if m.StreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersTimeout != nil { + l = (*durationpb.Duration)(m.RequestHeadersTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.StripPortMode.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.PathNormalizationOptions != nil { + l = m.PathNormalizationOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http3ProtocolOptions != nil { + if size, ok := interface{}(m.Http3ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http3ProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathWithEscapedSlashesAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.PathWithEscapedSlashesAction)) + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for _, e := range m.OriginalIpDetectionExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StripTrailingHostDot { + n += 3 + } + if m.SchemeHeaderTransformation != nil { + if size, ok := interface{}(m.SchemeHeaderTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SchemeHeaderTransformation) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProxyStatusConfig != nil { + l = m.ProxyStatusConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedHeaderValidationConfig != nil { + if size, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedHeaderValidationConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedPort { + n += 3 + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for _, e := range m.EarlyHeaderMutationExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddProxyProtocolConnectionState != nil { + l = (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 3 + } + if m.AccessLogOptions != nil { + l = m.AccessLogOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendLocalOverload { + n += 3 + } + if m.Http1SafeMaxConnectionDuration { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rds != nil { + l = m.Rds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRoutes != nil { + l = m.ScopedRoutes.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *HttpConnectionManager_StripAnyHostPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *LocalReplyConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Mappers) > 0 { + for _, e := range m.Mappers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BodyFormat != nil { + if size, ok := interface{}(m.BodyFormat).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormat) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseMapper) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + if size, ok := interface{}(m.Filter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Filter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatusCode != nil { + l = (*wrapperspb.UInt32Value)(m.StatusCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BodyFormatOverride != nil { + if size, ok := interface{}(m.BodyFormatOverride).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormatOverride) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HeadersToAdd) > 0 { + for _, e := range m.HeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ScopedRouteConfigurations) > 0 { + for _, e := range m.ScopedRouteConfigurations { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Separator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ElementSeparator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ExtractType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Index)) + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Element != nil { + l = m.Element.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueExtractor != nil { + l = m.HeaderValueExtractor.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ScopeKeyBuilder != nil { + l = m.ScopeKeyBuilder.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RdsConfigSource != nil { + if size, ok := interface{}(m.RdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfigurationsList != nil { + l = m.ScopedRouteConfigurationsList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRds != nil { + l = m.ScopedRds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRdsConfigSource != nil { + if size, ok := interface{}(m.ScopedRdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SrdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RequestIDExtension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyMobileHttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = m.Config.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go new file mode 100644 index 000000000..7d69f6349 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -0,0 +1,303 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the client_side_weighted_round_robin LB policy. +// +// This policy differs from the built-in ROUND_ROBIN policy in terms of +// how the endpoint weights are determined. In the ROUND_ROBIN policy, +// the endpoint weights are sent by the control plane via EDS. However, +// in this policy, the endpoint weights are instead determined via qps (queries +// per second), eps (errors per second), and utilization metrics sent by the +// endpoint using the Open Request Cost Aggregation (ORCA) protocol. Utilization +// is determined by using the ORCA application_utilization field, if set, or +// else falling back to the cpu_utilization field. All queries count toward qps, +// regardless of result. Only failed queries count toward eps. A config +// parameter error_utilization_penalty controls the penalty to adjust endpoint +// weights using eps and qps. The weight of a given endpoint is computed as: +// +// qps / (utilization + eps/qps * error_utilization_penalty) +// +// See the :ref:`load balancing architecture overview` for more information. +// +// [#next-free-field: 7] +type ClientSideWeightedRoundRobin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether to enable out-of-band utilization reporting collection from + // the endpoints. By default, per-request utilization reporting is used. + EnableOobLoadReport *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enable_oob_load_report,json=enableOobLoadReport,proto3" json:"enable_oob_load_report,omitempty"` + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OobReportingPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=oob_reporting_period,json=oobReportingPeriod,proto3" json:"oob_reporting_period,omitempty"` + // A given endpoint must report load metrics continuously for at least + // this long before the endpoint weight will be used. This avoids + // churn when the set of endpoint addresses changes. Takes effect + // both immediately after we establish a connection to an endpoint and + // after weight_expiration_period has caused us to stop using the most + // recent load metrics. Default is 10 seconds. + BlackoutPeriod *durationpb.Duration `protobuf:"bytes,3,opt,name=blackout_period,json=blackoutPeriod,proto3" json:"blackout_period,omitempty"` + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=weight_expiration_period,json=weightExpirationPeriod,proto3" json:"weight_expiration_period,omitempty"` + // How often endpoint weights are recalculated. Values less than 100ms are + // capped at 100ms. Default is 1 second. + WeightUpdatePeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=weight_update_period,json=weightUpdatePeriod,proto3" json:"weight_update_period,omitempty"` + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Configuration is rejected if this value is negative. + // Default is 1.0. + ErrorUtilizationPenalty *wrapperspb.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` +} + +func (x *ClientSideWeightedRoundRobin) Reset() { + *x = ClientSideWeightedRoundRobin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSideWeightedRoundRobin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSideWeightedRoundRobin) ProtoMessage() {} + +func (x *ClientSideWeightedRoundRobin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSideWeightedRoundRobin.ProtoReflect.Descriptor instead. +func (*ClientSideWeightedRoundRobin) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientSideWeightedRoundRobin) GetEnableOobLoadReport() *wrapperspb.BoolValue { + if x != nil { + return x.EnableOobLoadReport + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetOobReportingPeriod() *durationpb.Duration { + if x != nil { + return x.OobReportingPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetBlackoutPeriod() *durationpb.Duration { + if x != nil { + return x.BlackoutPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightExpirationPeriod() *durationpb.Duration { + if x != nil { + return x.WeightExpirationPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightUpdatePeriod() *durationpb.Duration { + if x != nil { + return x.WeightUpdatePeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrapperspb.FloatValue { + if x != nil { + return x.ErrorUtilizationPenalty + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = []byte{ + 0x0a, 0x73, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x04, 0x0a, 0x1c, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x4f, 0x0a, 0x16, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x4f, 0x6f, 0x62, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x4b, 0x0a, + 0x14, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6f, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x42, 0x0a, 0x0f, 0x62, 0x6c, + 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, + 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x53, + 0x0a, 0x18, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x4b, 0x0a, 0x14, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x63, 0x0a, 0x19, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x0a, 0x05, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x17, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x42, 0xa2, 0x02, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x0a, 0x5a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x21, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x96, 0x01, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, + 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = []interface{}{ + (*ClientSideWeightedRoundRobin)(nil), // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin + (*wrapperspb.BoolValue)(nil), // 1: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.FloatValue)(nil), // 3: google.protobuf.FloatValue +} +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.enable_oob_load_report:type_name -> google.protobuf.BoolValue + 2, // 1: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.oob_reporting_period:type_name -> google.protobuf.Duration + 2, // 2: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.blackout_period:type_name -> google.protobuf.Duration + 2, // 3: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_expiration_period:type_name -> google.protobuf.Duration + 2, // 4: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_update_period:type_name -> google.protobuf.Duration + 3, // 5: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.error_utilization_penalty:type_name -> google.protobuf.FloatValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() +} +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() { + if File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSideWeightedRoundRobin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto = out.File + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go new file mode 100644 index 000000000..0d6a6ca7e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go @@ -0,0 +1,300 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientSideWeightedRoundRobin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientSideWeightedRoundRobinMultiError, or nil if none found. +func (m *ClientSideWeightedRoundRobin) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientSideWeightedRoundRobin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEnableOobLoadReport()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableOobLoadReport()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOobReportingPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOobReportingPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetBlackoutPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBlackoutPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightExpirationPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightExpirationPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightUpdatePeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightUpdatePeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetErrorUtilizationPenalty(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := ClientSideWeightedRoundRobinValidationError{ + field: "ErrorUtilizationPenalty", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ClientSideWeightedRoundRobinMultiError(errors) + } + + return nil +} + +// ClientSideWeightedRoundRobinMultiError is an error wrapping multiple +// validation errors returned by ClientSideWeightedRoundRobin.ValidateAll() if +// the designated constraints aren't met. +type ClientSideWeightedRoundRobinMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientSideWeightedRoundRobinMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientSideWeightedRoundRobinMultiError) AllErrors() []error { return m } + +// ClientSideWeightedRoundRobinValidationError is the validation error returned +// by ClientSideWeightedRoundRobin.Validate if the designated constraints +// aren't met. +type ClientSideWeightedRoundRobinValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientSideWeightedRoundRobinValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientSideWeightedRoundRobinValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientSideWeightedRoundRobinValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientSideWeightedRoundRobinValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientSideWeightedRoundRobinValidationError) ErrorName() string { + return "ClientSideWeightedRoundRobinValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientSideWeightedRoundRobinValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientSideWeightedRoundRobin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientSideWeightedRoundRobinValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientSideWeightedRoundRobinValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go new file mode 100644 index 000000000..250bbcfd3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go @@ -0,0 +1,148 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientSideWeightedRoundRobin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientSideWeightedRoundRobin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ErrorUtilizationPenalty != nil { + size, err := (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.WeightUpdatePeriod != nil { + size, err := (*durationpb.Duration)(m.WeightUpdatePeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.WeightExpirationPeriod != nil { + size, err := (*durationpb.Duration)(m.WeightExpirationPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BlackoutPeriod != nil { + size, err := (*durationpb.Duration)(m.BlackoutPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.OobReportingPeriod != nil { + size, err := (*durationpb.Duration)(m.OobReportingPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EnableOobLoadReport != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableOobLoadReport).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientSideWeightedRoundRobin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableOobLoadReport != nil { + l = (*wrapperspb.BoolValue)(m.EnableOobLoadReport).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OobReportingPeriod != nil { + l = (*durationpb.Duration)(m.OobReportingPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BlackoutPeriod != nil { + l = (*durationpb.Duration)(m.BlackoutPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightExpirationPeriod != nil { + l = (*durationpb.Duration)(m.WeightExpirationPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightUpdatePeriod != nil { + l = (*durationpb.Duration)(m.WeightUpdatePeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorUtilizationPenalty != nil { + l = (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go new file mode 100644 index 000000000..726732605 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go @@ -0,0 +1,617 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LocalityLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LocalityConfigSpecifier: + // + // *LocalityLbConfig_ZoneAwareLbConfig_ + // *LocalityLbConfig_LocalityWeightedLbConfig_ + LocalityConfigSpecifier isLocalityLbConfig_LocalityConfigSpecifier `protobuf_oneof:"locality_config_specifier"` +} + +func (x *LocalityLbConfig) Reset() { + *x = LocalityLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *LocalityLbConfig) GetLocalityConfigSpecifier() isLocalityLbConfig_LocalityConfigSpecifier { + if m != nil { + return m.LocalityConfigSpecifier + } + return nil +} + +func (x *LocalityLbConfig) GetZoneAwareLbConfig() *LocalityLbConfig_ZoneAwareLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + return x.ZoneAwareLbConfig + } + return nil +} + +func (x *LocalityLbConfig) GetLocalityWeightedLbConfig() *LocalityLbConfig_LocalityWeightedLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + return x.LocalityWeightedLbConfig + } + return nil +} + +type isLocalityLbConfig_LocalityConfigSpecifier interface { + isLocalityLbConfig_LocalityConfigSpecifier() +} + +type LocalityLbConfig_ZoneAwareLbConfig_ struct { + // Configuration for local zone aware load balancing. + ZoneAwareLbConfig *LocalityLbConfig_ZoneAwareLbConfig `protobuf:"bytes,1,opt,name=zone_aware_lb_config,json=zoneAwareLbConfig,proto3,oneof"` +} + +type LocalityLbConfig_LocalityWeightedLbConfig_ struct { + // Enable locality weighted load balancing. + LocalityWeightedLbConfig *LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,2,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3,oneof"` +} + +func (*LocalityLbConfig_ZoneAwareLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +func (*LocalityLbConfig_LocalityWeightedLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +// Configuration for :ref:`slow start mode `. +type SlowStartConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // “new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))“, + // where “time_factor=(time_since_start_seconds / slow_start_time_seconds)“. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + Aggression *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=aggression,proto3" json:"aggression,omitempty"` + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + MinWeightPercent *v31.Percent `protobuf:"bytes,3,opt,name=min_weight_percent,json=minWeightPercent,proto3" json:"min_weight_percent,omitempty"` +} + +func (x *SlowStartConfig) Reset() { + *x = SlowStartConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SlowStartConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SlowStartConfig) ProtoMessage() {} + +func (x *SlowStartConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SlowStartConfig.ProtoReflect.Descriptor instead. +func (*SlowStartConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { + if x != nil { + return x.SlowStartWindow + } + return nil +} + +func (x *SlowStartConfig) GetAggression() *v3.RuntimeDouble { + if x != nil { + return x.Aggression + } + return nil +} + +func (x *SlowStartConfig) GetMinWeightPercent() *v31.Percent { + if x != nil { + return x.MinWeightPercent + } + return nil +} + +// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) +type ConsistentHashingLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` +} + +func (x *ConsistentHashingLbConfig) Reset() { + *x = ConsistentHashingLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsistentHashingLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsistentHashingLbConfig) ProtoMessage() {} + +func (x *ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsistentHashingLbConfig.ProtoReflect.Descriptor instead. +func (*ConsistentHashingLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +func (x *ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +// Configuration for :ref:`zone aware routing +// `. +type LocalityLbConfig_ZoneAwareLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + RoutingEnabled *v31.Percent `protobuf:"bytes,1,opt,name=routing_enabled,json=routingEnabled,proto3" json:"routing_enabled,omitempty"` + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + FailTrafficOnPanic bool `protobuf:"varint,3,opt,name=fail_traffic_on_panic,json=failTrafficOnPanic,proto3" json:"fail_traffic_on_panic,omitempty"` +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) Reset() { + *x = LocalityLbConfig_ZoneAwareLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_ZoneAwareLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_ZoneAwareLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_ZoneAwareLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v31.Percent { + if x != nil { + return x.RoutingEnabled + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinClusterSize + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetFailTrafficOnPanic() bool { + if x != nil { + return x.FailTrafficOnPanic + } + return false +} + +// Configuration for :ref:`locality weighted load balancing +// ` +type LocalityLbConfig_LocalityWeightedLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) Reset() { + *x = LocalityLbConfig_LocalityWeightedLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_LocalityWeightedLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_LocalityWeightedLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 1} +} + +var File_envoy_extensions_load_balancing_policies_common_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x04, 0x0a, 0x10, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x89, 0x01, 0x0a, 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, + 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x56, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, + 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9e, 0x01, 0x0a, + 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xcf, 0x01, + 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, + 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, + 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, + 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, + 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x1a, + 0x1a, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x20, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe3, 0x01, + 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x52, 0x11, + 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x42, 0xbd, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x40, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x62, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = []interface{}{ + (*LocalityLbConfig)(nil), // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*SlowStartConfig)(nil), // 1: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*ConsistentHashingLbConfig)(nil), // 2: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*LocalityLbConfig_ZoneAwareLbConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + (*LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration + (*v3.RuntimeDouble)(nil), // 6: envoy.config.core.v3.RuntimeDouble + (*v31.Percent)(nil), // 7: envoy.type.v3.Percent + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.UInt64Value)(nil), // 9: google.protobuf.UInt64Value +} +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = []int32{ + 3, // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.zone_aware_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + 4, // 1: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 5, // 2: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 6, // 3: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 7, // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 8, // 5: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 9, // 7: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() } +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() { + if File_envoy_extensions_load_balancing_policies_common_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SlowStartConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConsistentHashingLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_ZoneAwareLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_LocalityWeightedLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*LocalityLbConfig_ZoneAwareLbConfig_)(nil), + (*LocalityLbConfig_LocalityWeightedLbConfig_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_common_v3_common_proto = out.File + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go new file mode 100644 index 000000000..2aa2f26da --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go @@ -0,0 +1,814 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LocalityLbConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalityLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofLocalityConfigSpecifierPresent := false + switch v := m.LocalityConfigSpecifier.(type) { + case *LocalityLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetZoneAwareLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LocalityLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLocalityConfigSpecifierPresent { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return LocalityLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfigMultiError is an error wrapping multiple validation errors +// returned by LocalityLbConfig.ValidateAll() if the designated constraints +// aren't met. +type LocalityLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfigValidationError is the validation error returned by +// LocalityLbConfig.Validate if the designated constraints aren't met. +type LocalityLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfigValidationError) ErrorName() string { return "LocalityLbConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LocalityLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfigValidationError{} + +// Validate checks the field values on SlowStartConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SlowStartConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SlowStartConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SlowStartConfigMultiError, or nil if none found. +func (m *SlowStartConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SlowStartConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAggression()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAggression()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinWeightPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinWeightPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SlowStartConfigMultiError(errors) + } + + return nil +} + +// SlowStartConfigMultiError is an error wrapping multiple validation errors +// returned by SlowStartConfig.ValidateAll() if the designated constraints +// aren't met. +type SlowStartConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SlowStartConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SlowStartConfigMultiError) AllErrors() []error { return m } + +// SlowStartConfigValidationError is the validation error returned by +// SlowStartConfig.Validate if the designated constraints aren't met. +type SlowStartConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SlowStartConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SlowStartConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SlowStartConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SlowStartConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SlowStartConfigValidationError) ErrorName() string { return "SlowStartConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SlowStartConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSlowStartConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SlowStartConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SlowStartConfigValidationError{} + +// Validate checks the field values on ConsistentHashingLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConsistentHashingLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConsistentHashingLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConsistentHashingLbConfigMultiError, or nil if none found. +func (m *ConsistentHashingLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ConsistentHashingLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := ConsistentHashingLbConfigValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ConsistentHashingLbConfigMultiError(errors) + } + + return nil +} + +// ConsistentHashingLbConfigMultiError is an error wrapping multiple validation +// errors returned by ConsistentHashingLbConfig.ValidateAll() if the +// designated constraints aren't met. +type ConsistentHashingLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConsistentHashingLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConsistentHashingLbConfigMultiError) AllErrors() []error { return m } + +// ConsistentHashingLbConfigValidationError is the validation error returned by +// ConsistentHashingLbConfig.Validate if the designated constraints aren't met. +type ConsistentHashingLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConsistentHashingLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConsistentHashingLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConsistentHashingLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConsistentHashingLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConsistentHashingLbConfigValidationError) ErrorName() string { + return "ConsistentHashingLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ConsistentHashingLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConsistentHashingLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConsistentHashingLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConsistentHashingLbConfigValidationError{} + +// Validate checks the field values on LocalityLbConfig_ZoneAwareLbConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *LocalityLbConfig_ZoneAwareLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig_ZoneAwareLbConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// LocalityLbConfig_ZoneAwareLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_ZoneAwareLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRoutingEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoutingEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinClusterSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinClusterSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FailTrafficOnPanic + + if len(errors) > 0 { + return LocalityLbConfig_ZoneAwareLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_ZoneAwareLbConfigMultiError is an error wrapping multiple +// validation errors returned by +// LocalityLbConfig_ZoneAwareLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_ZoneAwareLbConfigValidationError is the validation error +// returned by LocalityLbConfig_ZoneAwareLbConfig.Validate if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_ZoneAwareLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_ZoneAwareLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +// Validate checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// LocalityLbConfig_LocalityWeightedLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return LocalityLbConfig_LocalityWeightedLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_LocalityWeightedLbConfigMultiError is an error wrapping +// multiple validation errors returned by +// LocalityLbConfig_LocalityWeightedLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_LocalityWeightedLbConfigValidationError is the validation +// error returned by LocalityLbConfig_LocalityWeightedLbConfig.Validate if the +// designated constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_LocalityWeightedLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_LocalityWeightedLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go new file mode 100644 index 000000000..ad3021a24 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go @@ -0,0 +1,492 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go new file mode 100644 index 000000000..819df3d7b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go @@ -0,0 +1,383 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available methods for selecting the host set from which to return the host with the +// fewest active requests. +type LeastRequest_SelectionMethod int32 + +const ( + // Return host with fewest requests from a set of “choice_count“ randomly selected hosts. + // Best selection method for most scenarios. + LeastRequest_N_CHOICES LeastRequest_SelectionMethod = 0 + // Return host with fewest requests from all hosts. + // Useful in some niche use cases involving low request rates and one of: + // (example 1) low request limits on workloads, or (example 2) few hosts. + // + // Example 1: Consider a workload type that can only accept one connection at a time. + // If such workloads are deployed across many hosts, only a small percentage of those + // workloads have zero connections at any given time, and the rate of new connections is low, + // the “FULL_SCAN“ method is more likely to select a suitable host than “N_CHOICES“. + // + // Example 2: Consider a workload type that is only deployed on 2 hosts. With default settings, + // the “N_CHOICES“ method will return the host with more active requests 25% of the time. + // If the request rate is sufficiently low, the behavior of always selecting the host with least + // requests as of the last metrics refresh may be preferable. + LeastRequest_FULL_SCAN LeastRequest_SelectionMethod = 1 +) + +// Enum value maps for LeastRequest_SelectionMethod. +var ( + LeastRequest_SelectionMethod_name = map[int32]string{ + 0: "N_CHOICES", + 1: "FULL_SCAN", + } + LeastRequest_SelectionMethod_value = map[string]int32{ + "N_CHOICES": 0, + "FULL_SCAN": 1, + } +) + +func (x LeastRequest_SelectionMethod) Enum() *LeastRequest_SelectionMethod { + p := new(LeastRequest_SelectionMethod) + *p = x + return p +} + +func (x LeastRequest_SelectionMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LeastRequest_SelectionMethod) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0].Descriptor() +} + +func (LeastRequest_SelectionMethod) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0] +} + +func (x LeastRequest_SelectionMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LeastRequest_SelectionMethod.Descriptor instead. +func (LeastRequest_SelectionMethod) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0, 0} +} + +// This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +// [#next-free-field: 7] +type LeastRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + // Only applies to the “N_CHOICES“ selection method. + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // “weight = load_balancing_weight / (active_requests + 1)^active_request_bias“ + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // “active_request_bias“ must be greater than or equal to 0.0. + // + // When “active_request_bias == 0.0“ the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When “active_request_bias > 0.0“ the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // + // This setting only takes effect if all host weights are not equal. + ActiveRequestBias *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=active_request_bias,json=activeRequestBias,proto3" json:"active_request_bias,omitempty"` + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *v31.SlowStartConfig `protobuf:"bytes,3,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` + // Configuration for local zone aware load balancing or locality weighted load balancing. + LocalityLbConfig *v31.LocalityLbConfig `protobuf:"bytes,4,opt,name=locality_lb_config,json=localityLbConfig,proto3" json:"locality_lb_config,omitempty"` + // [#not-implemented-hide:] + // Unused. Replaced by the `selection_method` enum for better extensibility. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. + EnableFullScan *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enable_full_scan,json=enableFullScan,proto3" json:"enable_full_scan,omitempty"` + // Method for selecting the host set from which to return the host with the fewest active requests. + // + // Defaults to “N_CHOICES“. + SelectionMethod LeastRequest_SelectionMethod `protobuf:"varint,6,opt,name=selection_method,json=selectionMethod,proto3,enum=envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest_SelectionMethod" json:"selection_method,omitempty"` +} + +func (x *LeastRequest) Reset() { + *x = LeastRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeastRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeastRequest) ProtoMessage() {} + +func (x *LeastRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeastRequest.ProtoReflect.Descriptor instead. +func (*LeastRequest) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0} +} + +func (x *LeastRequest) GetChoiceCount() *wrapperspb.UInt32Value { + if x != nil { + return x.ChoiceCount + } + return nil +} + +func (x *LeastRequest) GetActiveRequestBias() *v3.RuntimeDouble { + if x != nil { + return x.ActiveRequestBias + } + return nil +} + +func (x *LeastRequest) GetSlowStartConfig() *v31.SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +func (x *LeastRequest) GetLocalityLbConfig() *v31.LocalityLbConfig { + if x != nil { + return x.LocalityLbConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. +func (x *LeastRequest) GetEnableFullScan() *wrapperspb.BoolValue { + if x != nil { + return x.EnableFullScan + } + return nil +} + +func (x *LeastRequest) GetSelectionMethod() LeastRequest_SelectionMethod { + if x != nil { + return x.SelectionMethod + } + return LeastRequest_N_CHOICES +} + +var File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = []byte{ + 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x05, 0x0a, 0x0c, 0x4c, 0x65, + 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x6f, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x72, 0x0a, 0x12, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, + 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, + 0x61, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, + 0x6e, 0x12, 0x8c, 0x01, 0x0a, 0x10, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x57, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x22, 0x2f, 0x0a, 0x0f, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x5f, 0x43, 0x48, 0x4f, 0x49, 0x43, 0x45, 0x53, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x55, 0x4c, 0x4c, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, + 0x01, 0x42, 0xd8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x47, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = []interface{}{ + (LeastRequest_SelectionMethod)(0), // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + (*LeastRequest)(nil), // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest + (*wrapperspb.UInt32Value)(nil), // 2: google.protobuf.UInt32Value + (*v3.RuntimeDouble)(nil), // 3: envoy.config.core.v3.RuntimeDouble + (*v31.SlowStartConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*v31.LocalityLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue +} +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.choice_count:type_name -> google.protobuf.UInt32Value + 3, // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 4, // 2: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.slow_start_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + 5, // 3: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.locality_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + 6, // 4: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.enable_full_scan:type_name -> google.protobuf.BoolValue + 0, // 5: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.selection_method:type_name -> envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() +} +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() { + if File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeastRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes, + MessageInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto = out.File + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go new file mode 100644 index 000000000..75a3a2c2e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go @@ -0,0 +1,278 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LeastRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LeastRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LeastRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LeastRequestMultiError, or +// nil if none found. +func (m *LeastRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *LeastRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetChoiceCount(); wrapper != nil { + + if wrapper.GetValue() < 2 { + err := LeastRequestValidationError{ + field: "ChoiceCount", + reason: "value must be greater than or equal to 2", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetActiveRequestBias()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveRequestBias()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnableFullScan()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableFullScan()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := LeastRequest_SelectionMethod_name[int32(m.GetSelectionMethod())]; !ok { + err := LeastRequestValidationError{ + field: "SelectionMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return LeastRequestMultiError(errors) + } + + return nil +} + +// LeastRequestMultiError is an error wrapping multiple validation errors +// returned by LeastRequest.ValidateAll() if the designated constraints aren't met. +type LeastRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LeastRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LeastRequestMultiError) AllErrors() []error { return m } + +// LeastRequestValidationError is the validation error returned by +// LeastRequest.Validate if the designated constraints aren't met. +type LeastRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LeastRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LeastRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LeastRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LeastRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LeastRequestValidationError) ErrorName() string { return "LeastRequestValidationError" } + +// Error satisfies the builtin error interface +func (e LeastRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLeastRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LeastRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LeastRequestValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go new file mode 100644 index 000000000..a7abe001b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go @@ -0,0 +1,196 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LeastRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeastRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LeastRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SelectionMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SelectionMethod)) + i-- + dAtA[i] = 0x30 + } + if m.EnableFullScan != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableFullScan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalityLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.SlowStartConfig != nil { + if vtmsg, ok := interface{}(m.SlowStartConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SlowStartConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeastRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + if size, ok := interface{}(m.SlowStartConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SlowStartConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityLbConfig != nil { + if size, ok := interface{}(m.LocalityLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableFullScan != nil { + l = (*wrapperspb.BoolValue)(m.EnableFullScan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SelectionMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SelectionMethod)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go new file mode 100644 index 000000000..7468da4ac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This configuration allows the built-in PICK_FIRST LB policy to be configured +// via the LB policy extension point. +type PickFirst struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, instructs the LB policy to shuffle the list of addresses + // received from the name resolver before attempting to connect to them. + ShuffleAddressList bool `protobuf:"varint,1,opt,name=shuffle_address_list,json=shuffleAddressList,proto3" json:"shuffle_address_list,omitempty"` +} + +func (x *PickFirst) Reset() { + *x = PickFirst{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PickFirst) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PickFirst) ProtoMessage() {} + +func (x *PickFirst) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PickFirst.ProtoReflect.Descriptor instead. +func (*PickFirst) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP(), []int{0} +} + +func (x *PickFirst) GetShuffleAddressList() bool { + if x != nil { + return x.ShuffleAddressList + } + return false +} + +var File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = []byte{ + 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, + 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x3d, 0x0a, 0x09, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x75, + 0x66, 0x66, 0x6c, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0xcc, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x44, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x6a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, + 0x33, 0x3b, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = []interface{}{ + (*PickFirst)(nil), // 0: envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst +} +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() } +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() { + if File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PickFirst); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto = out.File + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go new file mode 100644 index 000000000..d142fed99 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go @@ -0,0 +1,138 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PickFirst with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PickFirst) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PickFirst with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PickFirstMultiError, or nil +// if none found. +func (m *PickFirst) ValidateAll() error { + return m.validate(true) +} + +func (m *PickFirst) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ShuffleAddressList + + if len(errors) > 0 { + return PickFirstMultiError(errors) + } + + return nil +} + +// PickFirstMultiError is an error wrapping multiple validation errors returned +// by PickFirst.ValidateAll() if the designated constraints aren't met. +type PickFirstMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PickFirstMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PickFirstMultiError) AllErrors() []error { return m } + +// PickFirstValidationError is the validation error returned by +// PickFirst.Validate if the designated constraints aren't met. +type PickFirstValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PickFirstValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PickFirstValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PickFirstValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PickFirstValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PickFirstValidationError) ErrorName() string { return "PickFirstValidationError" } + +// Error satisfies the builtin error interface +func (e PickFirstValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPickFirst.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PickFirstValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PickFirstValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go new file mode 100644 index 000000000..828e70621 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go @@ -0,0 +1,74 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PickFirst) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PickFirst) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PickFirst) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ShuffleAddressList { + i-- + if m.ShuffleAddressList { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PickFirst) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShuffleAddressList { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go new file mode 100644 index 000000000..6c544cc72 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go @@ -0,0 +1,393 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The hash function used to hash hosts onto the ketama ring. +type RingHash_HashFunction int32 + +const ( + // Currently defaults to XX_HASH. + RingHash_DEFAULT_HASH RingHash_HashFunction = 0 + // Use `xxHash `_. + RingHash_XX_HASH RingHash_HashFunction = 1 + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + RingHash_MURMUR_HASH_2 RingHash_HashFunction = 2 +) + +// Enum value maps for RingHash_HashFunction. +var ( + RingHash_HashFunction_name = map[int32]string{ + 0: "DEFAULT_HASH", + 1: "XX_HASH", + 2: "MURMUR_HASH_2", + } + RingHash_HashFunction_value = map[string]int32{ + "DEFAULT_HASH": 0, + "XX_HASH": 1, + "MURMUR_HASH_2": 2, + } +) + +func (x RingHash_HashFunction) Enum() *RingHash_HashFunction { + p := new(RingHash_HashFunction) + *p = x + return p +} + +func (x RingHash_HashFunction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RingHash_HashFunction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0].Descriptor() +} + +func (RingHash_HashFunction) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0] +} + +func (x RingHash_HashFunction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RingHash_HashFunction.Descriptor instead. +func (RingHash_HashFunction) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0, 0} +} + +// This configuration allows the built-in RING_HASH LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +// [#next-free-field: 8] +type RingHash struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction RingHash_HashFunction `protobuf:"varint,1,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash_HashFunction" json:"hash_function,omitempty"` + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,3,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + // + // .. note:: + // + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. + UseHostnameForHashing bool `protobuf:"varint,4,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + // + // .. note:: + // + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + // Common configuration for hashing-based load balancing policies. + ConsistentHashingLbConfig *v3.ConsistentHashingLbConfig `protobuf:"bytes,6,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` + // Enable locality weighted load balancing for ring hash lb explicitly. + LocalityWeightedLbConfig *v3.LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,7,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3" json:"locality_weighted_lb_config,omitempty"` +} + +func (x *RingHash) Reset() { + *x = RingHash{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RingHash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RingHash) ProtoMessage() {} + +func (x *RingHash) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RingHash.ProtoReflect.Descriptor instead. +func (*RingHash) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0} +} + +func (x *RingHash) GetHashFunction() RingHash_HashFunction { + if x != nil { + return x.HashFunction + } + return RingHash_DEFAULT_HASH +} + +func (x *RingHash) GetMinimumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinimumRingSize + } + return nil +} + +func (x *RingHash) GetMaximumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaximumRingSize + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. +func (x *RingHash) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. +func (x *RingHash) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +func (x *RingHash) GetConsistentHashingLbConfig() *v3.ConsistentHashingLbConfig { + if x != nil { + return x.ConsistentHashingLbConfig + } + return nil +} + +func (x *RingHash) GetLocalityWeightedLbConfig() *v3.LocalityLbConfig_LocalityWeightedLbConfig { + if x != nil { + return x.LocalityWeightedLbConfig + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = []byte{ + 0x0a, 0x45, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x1a, 0x3f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x06, 0x0a, + 0x08, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x12, 0x7b, 0x0a, 0x0d, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, + 0x68, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0c, 0xfa, 0x42, 0x09, 0x32, 0x07, 0x18, 0x80, 0x80, 0x80, 0x04, 0x28, 0x01, 0x52, 0x0f, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x8e, 0x01, 0x0a, + 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9c, 0x01, + 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x40, 0x0a, 0x0c, + 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, + 0x55, 0x52, 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x02, 0x42, 0xc8, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x43, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x42, 0x0d, + 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x68, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = []interface{}{ + (RingHash_HashFunction)(0), // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + (*RingHash)(nil), // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash + (*wrapperspb.UInt64Value)(nil), // 2: google.protobuf.UInt64Value + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*v3.ConsistentHashingLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*v3.LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig +} +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_function:type_name -> envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + 2, // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 2, // 2: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 3, // 3: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 4, // 4: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.consistent_hashing_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + 5, // 5: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() } +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() { + if File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RingHash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes, + MessageInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto = out.File + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go new file mode 100644 index 000000000..c5ec6e39c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go @@ -0,0 +1,252 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RingHash with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RingHash) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RingHash with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RingHashMultiError, or nil +// if none found. +func (m *RingHash) ValidateAll() error { + return m.validate(true) +} + +func (m *RingHash) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RingHash_HashFunction_name[int32(m.GetHashFunction())]; !ok { + err := RingHashValidationError{ + field: "HashFunction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMinimumRingSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 8388608 { + err := RingHashValidationError{ + field: "MinimumRingSize", + reason: "value must be inside range [1, 8388608]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetMaximumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := RingHashValidationError{ + field: "MaximumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := RingHashValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetConsistentHashingLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsistentHashingLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RingHashMultiError(errors) + } + + return nil +} + +// RingHashMultiError is an error wrapping multiple validation errors returned +// by RingHash.ValidateAll() if the designated constraints aren't met. +type RingHashMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RingHashMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RingHashMultiError) AllErrors() []error { return m } + +// RingHashValidationError is the validation error returned by +// RingHash.Validate if the designated constraints aren't met. +type RingHashValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RingHashValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RingHashValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RingHashValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RingHashValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RingHashValidationError) ErrorName() string { return "RingHashValidationError" } + +// Error satisfies the builtin error interface +func (e RingHashValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRingHash.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RingHashValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RingHashValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go new file mode 100644 index 000000000..f762ec4c6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go @@ -0,0 +1,191 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RingHash) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RingHash) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RingHash) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalityWeightedLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityWeightedLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.ConsistentHashingLbConfig != nil { + if vtmsg, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConsistentHashingLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RingHash) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsistentHashingLbConfig != nil { + if size, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConsistentHashingLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityWeightedLbConfig != nil { + if size, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityWeightedLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go new file mode 100644 index 000000000..7c3741a23 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the wrr_locality LB policy. See the :ref:`load balancing architecture overview +// ` for more information. +type WrrLocality struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The child LB policy to create for endpoint-picking within the chosen locality. + EndpointPickingPolicy *v3.LoadBalancingPolicy `protobuf:"bytes,1,opt,name=endpoint_picking_policy,json=endpointPickingPolicy,proto3" json:"endpoint_picking_policy,omitempty"` +} + +func (x *WrrLocality) Reset() { + *x = WrrLocality{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WrrLocality) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WrrLocality) ProtoMessage() {} + +func (x *WrrLocality) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WrrLocality.ProtoReflect.Descriptor instead. +func (*WrrLocality) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP(), []int{0} +} + +func (x *WrrLocality) GetEndpointPickingPolicy() *v3.LoadBalancingPolicy { + if x != nil { + return x.EndpointPickingPolicy + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x38, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7d, 0x0a, 0x0b, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x6e, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x15, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xd4, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x0a, 0x46, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x6e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, + 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x77, 0x72, + 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = []interface{}{ + (*WrrLocality)(nil), // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality + (*v3.LoadBalancingPolicy)(nil), // 1: envoy.config.cluster.v3.LoadBalancingPolicy +} +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality.endpoint_picking_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() } +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() { + if File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WrrLocality); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto = out.File + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go new file mode 100644 index 000000000..c4c33b4f0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go @@ -0,0 +1,176 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *WrrLocality) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WrrLocalityMultiError, or +// nil if none found. +func (m *WrrLocality) ValidateAll() error { + return m.validate(true) +} + +func (m *WrrLocality) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetEndpointPickingPolicy() == nil { + err := WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpointPickingPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointPickingPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WrrLocalityMultiError(errors) + } + + return nil +} + +// WrrLocalityMultiError is an error wrapping multiple validation errors +// returned by WrrLocality.ValidateAll() if the designated constraints aren't met. +type WrrLocalityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WrrLocalityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WrrLocalityMultiError) AllErrors() []error { return m } + +// WrrLocalityValidationError is the validation error returned by +// WrrLocality.Validate if the designated constraints aren't met. +type WrrLocalityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WrrLocalityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WrrLocalityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WrrLocalityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WrrLocalityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WrrLocalityValidationError) ErrorName() string { return "WrrLocalityValidationError" } + +// Error satisfies the builtin error interface +func (e WrrLocalityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWrrLocality.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WrrLocalityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WrrLocalityValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go new file mode 100644 index 000000000..1a0148661 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *WrrLocality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WrrLocality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WrrLocality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EndpointPickingPolicy != nil { + if vtmsg, ok := interface{}(m.EndpointPickingPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointPickingPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WrrLocality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointPickingPolicy != nil { + if size, ok := interface{}(m.EndpointPickingPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointPickingPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go new file mode 100644 index 000000000..4199deae5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Custom configuration for the RBAC audit logger that writes log entries +// directly to the operating system's standard output. +// The logger outputs in JSON format and is currently not configurable. +type StdoutAuditLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StdoutAuditLog) Reset() { + *x = StdoutAuditLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StdoutAuditLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StdoutAuditLog) ProtoMessage() {} + +func (x *StdoutAuditLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StdoutAuditLog.ProtoReflect.Descriptor instead. +func (*StdoutAuditLog) Descriptor() ([]byte, []int) { + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto protoreflect.FileDescriptor + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2d, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, + 0x64, 0x6f, 0x75, 0x74, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x42, 0xb3, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x3b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, + 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x5d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, + 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce sync.Once + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc +) + +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP() []byte { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce.Do(func() { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData) + }) + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData +} + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = []interface{}{ + (*StdoutAuditLog)(nil), // 0: envoy.extensions.rbac.audit_loggers.stream.v3.StdoutAuditLog +} +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() } +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() { + if File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdoutAuditLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes, + DependencyIndexes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs, + MessageInfos: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes, + }.Build() + File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto = out.File + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go new file mode 100644 index 000000000..5fe37d901 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go @@ -0,0 +1,137 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StdoutAuditLog with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StdoutAuditLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StdoutAuditLog with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StdoutAuditLogMultiError, +// or nil if none found. +func (m *StdoutAuditLog) ValidateAll() error { + return m.validate(true) +} + +func (m *StdoutAuditLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StdoutAuditLogMultiError(errors) + } + + return nil +} + +// StdoutAuditLogMultiError is an error wrapping multiple validation errors +// returned by StdoutAuditLog.ValidateAll() if the designated constraints +// aren't met. +type StdoutAuditLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StdoutAuditLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StdoutAuditLogMultiError) AllErrors() []error { return m } + +// StdoutAuditLogValidationError is the validation error returned by +// StdoutAuditLog.Validate if the designated constraints aren't met. +type StdoutAuditLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StdoutAuditLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StdoutAuditLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StdoutAuditLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StdoutAuditLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StdoutAuditLogValidationError) ErrorName() string { return "StdoutAuditLogValidationError" } + +// Error satisfies the builtin error interface +func (e StdoutAuditLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStdoutAuditLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StdoutAuditLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StdoutAuditLogValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go new file mode 100644 index 000000000..8e85a5680 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StdoutAuditLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StdoutAuditLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StdoutAuditLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *StdoutAuditLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go new file mode 100644 index 000000000..37df4d7a6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go @@ -0,0 +1,89 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/cert.proto + +package tlsv3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_envoy_extensions_transport_sockets_tls_v3_cert_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x72, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x9e, 0x01, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x42, 0x09, 0x43, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, + 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x50, 0x00, 0x50, 0x01, 0x50, 0x02, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes = []interface{}{} +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_cert_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_cert_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_cert_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_cert_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go new file mode 100644 index 000000000..aa4f445f5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/cert.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go new file mode 100644 index 000000000..0f7321c22 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -0,0 +1,1689 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TlsParameters_TlsProtocol int32 + +const ( + // Envoy will choose the optimal TLS version. + TlsParameters_TLS_AUTO TlsParameters_TlsProtocol = 0 + // TLS 1.0 + TlsParameters_TLSv1_0 TlsParameters_TlsProtocol = 1 + // TLS 1.1 + TlsParameters_TLSv1_1 TlsParameters_TlsProtocol = 2 + // TLS 1.2 + TlsParameters_TLSv1_2 TlsParameters_TlsProtocol = 3 + // TLS 1.3 + TlsParameters_TLSv1_3 TlsParameters_TlsProtocol = 4 +) + +// Enum value maps for TlsParameters_TlsProtocol. +var ( + TlsParameters_TlsProtocol_name = map[int32]string{ + 0: "TLS_AUTO", + 1: "TLSv1_0", + 2: "TLSv1_1", + 3: "TLSv1_2", + 4: "TLSv1_3", + } + TlsParameters_TlsProtocol_value = map[string]int32{ + "TLS_AUTO": 0, + "TLSv1_0": 1, + "TLSv1_1": 2, + "TLSv1_2": 3, + "TLSv1_3": 4, + } +) + +func (x TlsParameters_TlsProtocol) Enum() *TlsParameters_TlsProtocol { + p := new(TlsParameters_TlsProtocol) + *p = x + return p +} + +func (x TlsParameters_TlsProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TlsParameters_TlsProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[0].Descriptor() +} + +func (TlsParameters_TlsProtocol) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[0] +} + +func (x TlsParameters_TlsProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TlsParameters_TlsProtocol.Descriptor instead. +func (TlsParameters_TlsProtocol) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{0, 0} +} + +// Indicates the choice of GeneralName as defined in section 4.2.1.5 of RFC 5280 to match +// against. +type SubjectAltNameMatcher_SanType int32 + +const ( + SubjectAltNameMatcher_SAN_TYPE_UNSPECIFIED SubjectAltNameMatcher_SanType = 0 + SubjectAltNameMatcher_EMAIL SubjectAltNameMatcher_SanType = 1 + SubjectAltNameMatcher_DNS SubjectAltNameMatcher_SanType = 2 + SubjectAltNameMatcher_URI SubjectAltNameMatcher_SanType = 3 + SubjectAltNameMatcher_IP_ADDRESS SubjectAltNameMatcher_SanType = 4 + SubjectAltNameMatcher_OTHER_NAME SubjectAltNameMatcher_SanType = 5 +) + +// Enum value maps for SubjectAltNameMatcher_SanType. +var ( + SubjectAltNameMatcher_SanType_name = map[int32]string{ + 0: "SAN_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "DNS", + 3: "URI", + 4: "IP_ADDRESS", + 5: "OTHER_NAME", + } + SubjectAltNameMatcher_SanType_value = map[string]int32{ + "SAN_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "DNS": 2, + "URI": 3, + "IP_ADDRESS": 4, + "OTHER_NAME": 5, + } +) + +func (x SubjectAltNameMatcher_SanType) Enum() *SubjectAltNameMatcher_SanType { + p := new(SubjectAltNameMatcher_SanType) + *p = x + return p +} + +func (x SubjectAltNameMatcher_SanType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAltNameMatcher_SanType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[1].Descriptor() +} + +func (SubjectAltNameMatcher_SanType) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[1] +} + +func (x SubjectAltNameMatcher_SanType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAltNameMatcher_SanType.Descriptor instead. +func (SubjectAltNameMatcher_SanType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{5, 0} +} + +// Peer certificate verification mode. +type CertificateValidationContext_TrustChainVerification int32 + +const ( + // Perform default certificate verification (e.g., against CA / verification lists) + CertificateValidationContext_VERIFY_TRUST_CHAIN CertificateValidationContext_TrustChainVerification = 0 + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + CertificateValidationContext_ACCEPT_UNTRUSTED CertificateValidationContext_TrustChainVerification = 1 +) + +// Enum value maps for CertificateValidationContext_TrustChainVerification. +var ( + CertificateValidationContext_TrustChainVerification_name = map[int32]string{ + 0: "VERIFY_TRUST_CHAIN", + 1: "ACCEPT_UNTRUSTED", + } + CertificateValidationContext_TrustChainVerification_value = map[string]int32{ + "VERIFY_TRUST_CHAIN": 0, + "ACCEPT_UNTRUSTED": 1, + } +) + +func (x CertificateValidationContext_TrustChainVerification) Enum() *CertificateValidationContext_TrustChainVerification { + p := new(CertificateValidationContext_TrustChainVerification) + *p = x + return p +} + +func (x CertificateValidationContext_TrustChainVerification) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CertificateValidationContext_TrustChainVerification) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[2].Descriptor() +} + +func (CertificateValidationContext_TrustChainVerification) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[2] +} + +func (x CertificateValidationContext_TrustChainVerification) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CertificateValidationContext_TrustChainVerification.Descriptor instead. +func (CertificateValidationContext_TrustChainVerification) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} +} + +// [#next-free-field: 6] +type TlsParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Minimum TLS protocol version. By default, it's “TLSv1_2“ for both clients and servers. + // + // TLS protocol versions below TLSv1_2 require setting compatible ciphers with the + // “cipher_suites“ setting as the default ciphers no longer include compatible ciphers. + // + // .. attention:: + // + // Using TLS protocol versions below TLSv1_2 has serious security considerations and risks. + TlsMinimumProtocolVersion TlsParameters_TlsProtocol `protobuf:"varint,1,opt,name=tls_minimum_protocol_version,json=tlsMinimumProtocolVersion,proto3,enum=envoy.extensions.transport_sockets.tls.v3.TlsParameters_TlsProtocol" json:"tls_minimum_protocol_version,omitempty"` + // Maximum TLS protocol version. By default, it's “TLSv1_2“ for clients and “TLSv1_3“ for + // servers. + TlsMaximumProtocolVersion TlsParameters_TlsProtocol `protobuf:"varint,2,opt,name=tls_maximum_protocol_version,json=tlsMaximumProtocolVersion,proto3,enum=envoy.extensions.transport_sockets.tls.v3.TlsParameters_TlsProtocol" json:"tls_maximum_protocol_version,omitempty"` + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). + // + // If not specified, a default list will be used. Defaults are different for server (downstream) and + // client (upstream) TLS configurations. + // Defaults will change over time in response to security considerations; If you care, configure + // it instead of using the default. + // + // In non-FIPS builds, the default server cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In non-FIPS builds, the default client cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + CipherSuites []string `protobuf:"bytes,3,rep,name=cipher_suites,json=cipherSuites,proto3" json:"cipher_suites,omitempty"` + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + EcdhCurves []string `protobuf:"bytes,4,rep,name=ecdh_curves,json=ecdhCurves,proto3" json:"ecdh_curves,omitempty"` + // If specified, the TLS connection will only support the specified signature algorithms. + // The list is ordered by preference. + // If not specified, the default signature algorithms defined by BoringSSL will be used. + // + // Default signature algorithms selected by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // ecdsa_secp256r1_sha256 + // rsa_pss_rsae_sha256 + // rsa_pkcs1_sha256 + // ecdsa_secp384r1_sha384 + // rsa_pss_rsae_sha384 + // rsa_pkcs1_sha384 + // rsa_pss_rsae_sha512 + // rsa_pkcs1_sha512 + // rsa_pkcs1_sha1 + // + // Signature algorithms supported by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // rsa_pkcs1_sha256 + // rsa_pkcs1_sha384 + // rsa_pkcs1_sha512 + // ecdsa_secp256r1_sha256 + // ecdsa_secp384r1_sha384 + // ecdsa_secp521r1_sha512 + // rsa_pss_rsae_sha256 + // rsa_pss_rsae_sha384 + // rsa_pss_rsae_sha512 + // ed25519 + // rsa_pkcs1_sha1 + // ecdsa_sha1 + SignatureAlgorithms []string `protobuf:"bytes,5,rep,name=signature_algorithms,json=signatureAlgorithms,proto3" json:"signature_algorithms,omitempty"` +} + +func (x *TlsParameters) Reset() { + *x = TlsParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsParameters) ProtoMessage() {} + +func (x *TlsParameters) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsParameters.ProtoReflect.Descriptor instead. +func (*TlsParameters) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (x *TlsParameters) GetTlsMinimumProtocolVersion() TlsParameters_TlsProtocol { + if x != nil { + return x.TlsMinimumProtocolVersion + } + return TlsParameters_TLS_AUTO +} + +func (x *TlsParameters) GetTlsMaximumProtocolVersion() TlsParameters_TlsProtocol { + if x != nil { + return x.TlsMaximumProtocolVersion + } + return TlsParameters_TLS_AUTO +} + +func (x *TlsParameters) GetCipherSuites() []string { + if x != nil { + return x.CipherSuites + } + return nil +} + +func (x *TlsParameters) GetEcdhCurves() []string { + if x != nil { + return x.EcdhCurves + } + return nil +} + +func (x *TlsParameters) GetSignatureAlgorithms() []string { + if x != nil { + return x.SignatureAlgorithms + } + return nil +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +type PrivateKeyProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Private key method provider name. The name must match a + // supported private key method provider type. + ProviderName string `protobuf:"bytes,1,opt,name=provider_name,json=providerName,proto3" json:"provider_name,omitempty"` + // Private key method provider specific configuration. + // + // Types that are assignable to ConfigType: + // + // *PrivateKeyProvider_TypedConfig + ConfigType isPrivateKeyProvider_ConfigType `protobuf_oneof:"config_type"` + // If the private key provider isn't available (eg. the required hardware capability doesn't existed), + // Envoy will fallback to the BoringSSL default implementation when the “fallback“ is true. + // The default value is “false“. + Fallback bool `protobuf:"varint,4,opt,name=fallback,proto3" json:"fallback,omitempty"` +} + +func (x *PrivateKeyProvider) Reset() { + *x = PrivateKeyProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrivateKeyProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrivateKeyProvider) ProtoMessage() {} + +func (x *PrivateKeyProvider) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrivateKeyProvider.ProtoReflect.Descriptor instead. +func (*PrivateKeyProvider) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *PrivateKeyProvider) GetProviderName() string { + if x != nil { + return x.ProviderName + } + return "" +} + +func (m *PrivateKeyProvider) GetConfigType() isPrivateKeyProvider_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *PrivateKeyProvider) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*PrivateKeyProvider_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *PrivateKeyProvider) GetFallback() bool { + if x != nil { + return x.Fallback + } + return false +} + +type isPrivateKeyProvider_ConfigType interface { + isPrivateKeyProvider_ConfigType() +} + +type PrivateKeyProvider_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*PrivateKeyProvider_TypedConfig) isPrivateKeyProvider_ConfigType() {} + +// [#next-free-field: 9] +type TlsCertificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS certificate chain. + // + // If “certificate_chain“ is a filesystem path, a watch will be added to the + // parent directory for any file moves to support rotation. This currently + // only applies to dynamic secrets, when the “TlsCertificate“ is delivered via + // SDS. + CertificateChain *v3.DataSource `protobuf:"bytes,1,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The TLS private key. + // + // If “private_key“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “TlsCertificate“ is delivered via SDS. + PrivateKey *v3.DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // “Pkcs12“ data containing TLS certificate, chain, and private key. + // + // If “pkcs12“ is a filesystem path, the file will be read, but no watch will + // be added to the parent directory, since “pkcs12“ isn't used by SDS. + // This field is mutually exclusive with “certificate_chain“, “private_key“ and “private_key_provider“. + // This can't be marked as “oneof“ due to API compatibility reasons. Setting + // both :ref:`private_key `, + // :ref:`certificate_chain `, + // or :ref:`private_key_provider ` + // and :ref:`pkcs12 ` + // fields will result in an error. Use :ref:`password + // ` + // to specify the password to unprotect the “PKCS12“ data, if necessary. + Pkcs12 *v3.DataSource `protobuf:"bytes,8,opt,name=pkcs12,proto3" json:"pkcs12,omitempty"` + // If specified, updates of file-based “certificate_chain“ and “private_key“ + // sources will be triggered by this watch. The certificate/key pair will be + // read together and validated for atomic read consistency (i.e. no + // intervening modification occurred between cert/key read, verified by file + // hash comparisons). This allows explicit control over the path watched, by + // default the parent directories of the filesystem paths in + // “certificate_chain“ and “private_key“ are watched if this field is not + // specified. This only applies when a “TlsCertificate“ is delivered by SDS + // with references to filesystem paths. See the :ref:`SDS key rotation + // ` documentation for further details. + WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,7,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as “oneof“ due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider *PrivateKeyProvider `protobuf:"bytes,6,opt,name=private_key_provider,json=privateKeyProvider,proto3" json:"private_key_provider,omitempty"` + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + Password *v3.DataSource `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via “filename“ or + // “inline_bytes“. The response may pertain to only one certificate. + OcspStaple *v3.DataSource `protobuf:"bytes,4,opt,name=ocsp_staple,json=ocspStaple,proto3" json:"ocsp_staple,omitempty"` + // [#not-implemented-hide:] + SignedCertificateTimestamp []*v3.DataSource `protobuf:"bytes,5,rep,name=signed_certificate_timestamp,json=signedCertificateTimestamp,proto3" json:"signed_certificate_timestamp,omitempty"` +} + +func (x *TlsCertificate) Reset() { + *x = TlsCertificate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsCertificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsCertificate) ProtoMessage() {} + +func (x *TlsCertificate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsCertificate.ProtoReflect.Descriptor instead. +func (*TlsCertificate) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *TlsCertificate) GetCertificateChain() *v3.DataSource { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *TlsCertificate) GetPrivateKey() *v3.DataSource { + if x != nil { + return x.PrivateKey + } + return nil +} + +func (x *TlsCertificate) GetPkcs12() *v3.DataSource { + if x != nil { + return x.Pkcs12 + } + return nil +} + +func (x *TlsCertificate) GetWatchedDirectory() *v3.WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +func (x *TlsCertificate) GetPrivateKeyProvider() *PrivateKeyProvider { + if x != nil { + return x.PrivateKeyProvider + } + return nil +} + +func (x *TlsCertificate) GetPassword() *v3.DataSource { + if x != nil { + return x.Password + } + return nil +} + +func (x *TlsCertificate) GetOcspStaple() *v3.DataSource { + if x != nil { + return x.OcspStaple + } + return nil +} + +func (x *TlsCertificate) GetSignedCertificateTimestamp() []*v3.DataSource { + if x != nil { + return x.SignedCertificateTimestamp + } + return nil +} + +type TlsSessionTicketKeys struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of “openssl rand 80“. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + Keys []*v3.DataSource `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` +} + +func (x *TlsSessionTicketKeys) Reset() { + *x = TlsSessionTicketKeys{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsSessionTicketKeys) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsSessionTicketKeys) ProtoMessage() {} + +func (x *TlsSessionTicketKeys) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsSessionTicketKeys.ProtoReflect.Descriptor instead. +func (*TlsSessionTicketKeys) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{3} +} + +func (x *TlsSessionTicketKeys) GetKeys() []*v3.DataSource { + if x != nil { + return x.Keys + } + return nil +} + +// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. +// The plugin instances are defined in the client's bootstrap file. +// The plugin allows certificates to be fetched/refreshed over the network asynchronously with +// respect to the TLS handshake. +// [#not-implemented-hide:] +type CertificateProviderPluginInstance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provider instance name. If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + CertificateName string `protobuf:"bytes,2,opt,name=certificate_name,json=certificateName,proto3" json:"certificate_name,omitempty"` +} + +func (x *CertificateProviderPluginInstance) Reset() { + *x = CertificateProviderPluginInstance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateProviderPluginInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateProviderPluginInstance) ProtoMessage() {} + +func (x *CertificateProviderPluginInstance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateProviderPluginInstance.ProtoReflect.Descriptor instead. +func (*CertificateProviderPluginInstance) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{4} +} + +func (x *CertificateProviderPluginInstance) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *CertificateProviderPluginInstance) GetCertificateName() string { + if x != nil { + return x.CertificateName + } + return "" +} + +// Matcher for subject alternative names, to match both type and value of the SAN. +type SubjectAltNameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specification of type of SAN. Note that the default enum value is an invalid choice. + SanType SubjectAltNameMatcher_SanType `protobuf:"varint,1,opt,name=san_type,json=sanType,proto3,enum=envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher_SanType" json:"san_type,omitempty"` + // Matcher for SAN value. + // + // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: + // + // - OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") + // - BOOLEAN: Validated against strings "true" or "false" + // - INTEGER/ENUMERATED: Validated against a string containing the integer value + // - NULL: Validated against an empty string + // - Other types: Validated directly against the string value + Matcher *v31.StringMatcher `protobuf:"bytes,2,opt,name=matcher,proto3" json:"matcher,omitempty"` + // OID Value which is required if OTHER_NAME SAN type is used. + // For example, UPN OID is 1.3.6.1.4.1.311.20.2.3 + // (Reference: http://oid-info.com/get/1.3.6.1.4.1.311.20.2.3). + // + // If set for SAN types other than OTHER_NAME, it will be ignored. + Oid string `protobuf:"bytes,3,opt,name=oid,proto3" json:"oid,omitempty"` +} + +func (x *SubjectAltNameMatcher) Reset() { + *x = SubjectAltNameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubjectAltNameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAltNameMatcher) ProtoMessage() {} + +func (x *SubjectAltNameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAltNameMatcher.ProtoReflect.Descriptor instead. +func (*SubjectAltNameMatcher) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{5} +} + +func (x *SubjectAltNameMatcher) GetSanType() SubjectAltNameMatcher_SanType { + if x != nil { + return x.SanType + } + return SubjectAltNameMatcher_SAN_TYPE_UNSPECIFIED +} + +func (x *SubjectAltNameMatcher) GetMatcher() *v31.StringMatcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *SubjectAltNameMatcher) GetOid() string { + if x != nil { + return x.Oid + } + return "" +} + +// [#next-free-field: 18] +type CertificateValidationContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_typed_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. Note + // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be + // provided for all certificate authorities in that chain. Failure to do so will result in + // verification failure for both revoked and unrevoked certificates from that chain. + // The behavior of requiring all certificates to contain CRLs can be altered by + // setting :ref:`only_verify_leaf_cert_crl ` + // true. If set to true, only the final certificate in the chain undergoes CRL verification. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + // + // If “trusted_ca“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “CertificateValidationContext“ is + // delivered via SDS. + // + // X509_V_FLAG_PARTIAL_CHAIN is set by default, so non-root/intermediate ca certificate in “trusted_ca“ + // can be treated as trust anchor as well. It allows verification with building valid partial chain instead + // of a full chain. + // + // If “ca_certificate_provider_instance“ is set, it takes precedence over “trusted_ca“. + TrustedCa *v3.DataSource `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"` + // Certificate provider instance for fetching TLS certificates. + // + // If set, takes precedence over “trusted_ca“. + // [#not-implemented-hide:] + CaCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,13,opt,name=ca_certificate_provider_instance,json=caCertificateProviderInstance,proto3" json:"ca_certificate_provider_instance,omitempty"` + // Use system root certs for validation. + // If present, system root certs are used only if neither of the “trusted_ca“ + // or “ca_certificate_provider_instance“ fields are set. + // [#not-implemented-hide:] + SystemRootCerts *CertificateValidationContext_SystemRootCerts `protobuf:"bytes,17,opt,name=system_root_certs,json=systemRootCerts,proto3" json:"system_root_certs,omitempty"` + // If specified, updates of a file-based “trusted_ca“ source will be triggered + // by this watch. This allows explicit control over the path watched, by + // default the parent directory of the filesystem path in “trusted_ca“ is + // watched if this field is not specified. This only applies when a + // “CertificateValidationContext“ is delivered by SDS with references to + // filesystem paths. See the :ref:`SDS key rotation ` + // documentation for further details. + WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,11,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + VerifyCertificateSpki []string `protobuf:"bytes,3,rep,name=verify_certificate_spki,json=verifyCertificateSpki,proto3" json:"verify_certificate_spki,omitempty"` + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + VerifyCertificateHash []string `protobuf:"bytes,2,rep,name=verify_certificate_hash,json=verifyCertificateHash,proto3" json:"verify_certificate_hash,omitempty"` + // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matchers. + // The matching uses "any" semantics, that is to say, the SAN is verified if at least one matcher is + // matched. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_typed_subject_alt_names: + // - san_type: DNS + // matcher: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + MatchTypedSubjectAltNames []*SubjectAltNameMatcher `protobuf:"bytes,15,rep,name=match_typed_subject_alt_names,json=matchTypedSubjectAltNames,proto3" json:"match_typed_subject_alt_names,omitempty"` + // This field is deprecated in favor of + // :ref:`match_typed_subject_alt_names + // `. + // Note that if both this field and :ref:`match_typed_subject_alt_names + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/common.proto. + MatchSubjectAltNames []*v31.StringMatcher `protobuf:"bytes,9,rep,name=match_subject_alt_names,json=matchSubjectAltNames,proto3" json:"match_subject_alt_names,omitempty"` + // [#not-implemented-hide:] Must present signed certificate time-stamp. + RequireSignedCertificateTimestamp *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=require_signed_certificate_timestamp,json=requireSignedCertificateTimestamp,proto3" json:"require_signed_certificate_timestamp,omitempty"` + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. Note that if a CRL is provided + // for any certificate authority in a trust chain, a CRL must be provided + // for all certificate authorities in that chain. Failure to do so will + // result in verification failure for both revoked and unrevoked certificates + // from that chain. This default behavior can be altered by setting + // :ref:`only_verify_leaf_cert_crl ` to + // true. + // + // If “crl“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “CertificateValidationContext“ is + // delivered via SDS. + Crl *v3.DataSource `protobuf:"bytes,7,opt,name=crl,proto3" json:"crl,omitempty"` + // If specified, Envoy will not reject expired certificates. + AllowExpiredCertificate bool `protobuf:"varint,8,opt,name=allow_expired_certificate,json=allowExpiredCertificate,proto3" json:"allow_expired_certificate,omitempty"` + // Certificate trust chain verification mode. + TrustChainVerification CertificateValidationContext_TrustChainVerification `protobuf:"varint,10,opt,name=trust_chain_verification,json=trustChainVerification,proto3,enum=envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext_TrustChainVerification" json:"trust_chain_verification,omitempty"` + // The configuration of an extension specific certificate validator. + // If specified, all validation is done by the specified validator, + // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). + // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. + // [#extension-category: envoy.tls.cert_validator] + CustomValidatorConfig *v3.TypedExtensionConfig `protobuf:"bytes,12,opt,name=custom_validator_config,json=customValidatorConfig,proto3" json:"custom_validator_config,omitempty"` + // If this option is set to true, only the certificate at the end of the + // certificate chain will be subject to validation by :ref:`CRL `. + OnlyVerifyLeafCertCrl bool `protobuf:"varint,14,opt,name=only_verify_leaf_cert_crl,json=onlyVerifyLeafCertCrl,proto3" json:"only_verify_leaf_cert_crl,omitempty"` + // Defines maximum depth of a certificate chain accepted in verification, the default limit is 100, though this can be system-dependent. + // This number does not include the leaf but includes the trust anchor, so a depth of 1 allows the leaf and one CA certificate. If a trusted issuer + // appears in the chain, but in a depth larger than configured, the certificate validation will fail. + // This matches the semantics of “SSL_CTX_set_verify_depth“ in OpenSSL 1.0.x and older versions of BoringSSL. It differs from “SSL_CTX_set_verify_depth“ + // in OpenSSL 1.1.x and newer versions of BoringSSL in that the trust anchor is included. + // Trusted issues are specified by setting :ref:`trusted_ca ` + MaxVerifyDepth *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=max_verify_depth,json=maxVerifyDepth,proto3" json:"max_verify_depth,omitempty"` +} + +func (x *CertificateValidationContext) Reset() { + *x = CertificateValidationContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateValidationContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateValidationContext) ProtoMessage() {} + +func (x *CertificateValidationContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateValidationContext.ProtoReflect.Descriptor instead. +func (*CertificateValidationContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6} +} + +func (x *CertificateValidationContext) GetTrustedCa() *v3.DataSource { + if x != nil { + return x.TrustedCa + } + return nil +} + +func (x *CertificateValidationContext) GetCaCertificateProviderInstance() *CertificateProviderPluginInstance { + if x != nil { + return x.CaCertificateProviderInstance + } + return nil +} + +func (x *CertificateValidationContext) GetSystemRootCerts() *CertificateValidationContext_SystemRootCerts { + if x != nil { + return x.SystemRootCerts + } + return nil +} + +func (x *CertificateValidationContext) GetWatchedDirectory() *v3.WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +func (x *CertificateValidationContext) GetVerifyCertificateSpki() []string { + if x != nil { + return x.VerifyCertificateSpki + } + return nil +} + +func (x *CertificateValidationContext) GetVerifyCertificateHash() []string { + if x != nil { + return x.VerifyCertificateHash + } + return nil +} + +func (x *CertificateValidationContext) GetMatchTypedSubjectAltNames() []*SubjectAltNameMatcher { + if x != nil { + return x.MatchTypedSubjectAltNames + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/common.proto. +func (x *CertificateValidationContext) GetMatchSubjectAltNames() []*v31.StringMatcher { + if x != nil { + return x.MatchSubjectAltNames + } + return nil +} + +func (x *CertificateValidationContext) GetRequireSignedCertificateTimestamp() *wrapperspb.BoolValue { + if x != nil { + return x.RequireSignedCertificateTimestamp + } + return nil +} + +func (x *CertificateValidationContext) GetCrl() *v3.DataSource { + if x != nil { + return x.Crl + } + return nil +} + +func (x *CertificateValidationContext) GetAllowExpiredCertificate() bool { + if x != nil { + return x.AllowExpiredCertificate + } + return false +} + +func (x *CertificateValidationContext) GetTrustChainVerification() CertificateValidationContext_TrustChainVerification { + if x != nil { + return x.TrustChainVerification + } + return CertificateValidationContext_VERIFY_TRUST_CHAIN +} + +func (x *CertificateValidationContext) GetCustomValidatorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomValidatorConfig + } + return nil +} + +func (x *CertificateValidationContext) GetOnlyVerifyLeafCertCrl() bool { + if x != nil { + return x.OnlyVerifyLeafCertCrl + } + return false +} + +func (x *CertificateValidationContext) GetMaxVerifyDepth() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxVerifyDepth + } + return nil +} + +type CertificateValidationContext_SystemRootCerts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CertificateValidationContext_SystemRootCerts) Reset() { + *x = CertificateValidationContext_SystemRootCerts{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateValidationContext_SystemRootCerts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateValidationContext_SystemRootCerts) ProtoMessage() {} + +func (x *CertificateValidationContext_SystemRootCerts) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateValidationContext_SystemRootCerts.ProtoReflect.Descriptor instead. +func (*CertificateValidationContext_SystemRootCerts) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} +} + +var File_envoy_extensions_transport_sockets_tls_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x04, 0x0a, 0x0d, + 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x8f, 0x01, + 0x0a, 0x1c, 0x74, 0x6c, 0x73, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x54, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x19, 0x74, 0x6c, 0x73, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x8f, 0x01, 0x0a, 0x1c, 0x74, 0x6c, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x19, 0x74, 0x6c, 0x73, 0x4d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x68, 0x5f, 0x63, + 0x75, 0x72, 0x76, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x63, 0x64, + 0x68, 0x43, 0x75, 0x72, 0x76, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x54, 0x6c, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4c, 0x53, + 0x5f, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, + 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, + 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x26, 0x9a, 0xc5, 0x88, + 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x22, 0xeb, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, + 0x68, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0xc8, 0x05, 0x0a, 0x0e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, + 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x40, + 0x0a, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, + 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x14, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x52, 0x12, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, + 0x02, 0x01, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x41, 0x0a, 0x0b, + 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x0a, 0x6f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x12, + 0x62, 0x0a, 0x1c, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x1a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, + 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x8b, 0x01, 0x0a, + 0x14, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0xb8, + 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, 0x0a, 0x21, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0xc6, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x61, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, + 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x22, 0x60, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, + 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, + 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, + 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x05, 0x22, 0xa9, 0x0d, 0x0a, 0x1c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, + 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, + 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, + 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, + 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, + 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, + 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, + 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x1a, 0x11, 0x0a, + 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, + 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, + 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, + 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = []interface{}{ + (TlsParameters_TlsProtocol)(0), // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + (SubjectAltNameMatcher_SanType)(0), // 1: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType + (CertificateValidationContext_TrustChainVerification)(0), // 2: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification + (*TlsParameters)(nil), // 3: envoy.extensions.transport_sockets.tls.v3.TlsParameters + (*PrivateKeyProvider)(nil), // 4: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider + (*TlsCertificate)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*TlsSessionTicketKeys)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*CertificateProviderPluginInstance)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + (*SubjectAltNameMatcher)(nil), // 8: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher + (*CertificateValidationContext)(nil), // 9: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + (*CertificateValidationContext_SystemRootCerts)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*v3.DataSource)(nil), // 12: envoy.config.core.v3.DataSource + (*v3.WatchedDirectory)(nil), // 13: envoy.config.core.v3.WatchedDirectory + (*v31.StringMatcher)(nil), // 14: envoy.type.matcher.v3.StringMatcher + (*wrapperspb.BoolValue)(nil), // 15: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 16: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value +} +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_minimum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + 0, // 1: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_maximum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + 11, // 2: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider.typed_config:type_name -> google.protobuf.Any + 12, // 3: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.certificate_chain:type_name -> envoy.config.core.v3.DataSource + 12, // 4: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key:type_name -> envoy.config.core.v3.DataSource + 12, // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.pkcs12:type_name -> envoy.config.core.v3.DataSource + 13, // 6: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 4, // 7: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider + 12, // 8: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.password:type_name -> envoy.config.core.v3.DataSource + 12, // 9: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.ocsp_staple:type_name -> envoy.config.core.v3.DataSource + 12, // 10: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.signed_certificate_timestamp:type_name -> envoy.config.core.v3.DataSource + 12, // 11: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys.keys:type_name -> envoy.config.core.v3.DataSource + 1, // 12: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.san_type:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType + 14, // 13: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 12, // 14: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca:type_name -> envoy.config.core.v3.DataSource + 7, // 15: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.ca_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + 10, // 16: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.system_root_certs:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + 13, // 17: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 8, // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_typed_subject_alt_names:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher + 14, // 19: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names:type_name -> envoy.type.matcher.v3.StringMatcher + 15, // 20: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.require_signed_certificate_timestamp:type_name -> google.protobuf.BoolValue + 12, // 21: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.crl:type_name -> envoy.config.core.v3.DataSource + 2, // 22: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trust_chain_verification:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification + 16, // 23: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.custom_validator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 24: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.max_verify_depth:type_name -> google.protobuf.UInt32Value + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrivateKeyProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsCertificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsSessionTicketKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateProviderPluginInstance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAltNameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateValidationContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateValidationContext_SystemRootCerts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PrivateKeyProvider_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs, + EnumInfos: file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_common_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go new file mode 100644 index 000000000..c020641fb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -0,0 +1,1648 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TlsParameters with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsParameters) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsParameters with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsParametersMultiError, or +// nil if none found. +func (m *TlsParameters) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsParameters) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := TlsParameters_TlsProtocol_name[int32(m.GetTlsMinimumProtocolVersion())]; !ok { + err := TlsParametersValidationError{ + field: "TlsMinimumProtocolVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := TlsParameters_TlsProtocol_name[int32(m.GetTlsMaximumProtocolVersion())]; !ok { + err := TlsParametersValidationError{ + field: "TlsMaximumProtocolVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return TlsParametersMultiError(errors) + } + + return nil +} + +// TlsParametersMultiError is an error wrapping multiple validation errors +// returned by TlsParameters.ValidateAll() if the designated constraints +// aren't met. +type TlsParametersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsParametersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsParametersMultiError) AllErrors() []error { return m } + +// TlsParametersValidationError is the validation error returned by +// TlsParameters.Validate if the designated constraints aren't met. +type TlsParametersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsParametersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsParametersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsParametersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsParametersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsParametersValidationError) ErrorName() string { return "TlsParametersValidationError" } + +// Error satisfies the builtin error interface +func (e TlsParametersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsParameters.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsParametersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsParametersValidationError{} + +// Validate checks the field values on PrivateKeyProvider with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PrivateKeyProvider) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PrivateKeyProvider with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PrivateKeyProviderMultiError, or nil if none found. +func (m *PrivateKeyProvider) ValidateAll() error { + return m.validate(true) +} + +func (m *PrivateKeyProvider) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetProviderName()) < 1 { + err := PrivateKeyProviderValidationError{ + field: "ProviderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Fallback + + switch v := m.ConfigType.(type) { + case *PrivateKeyProvider_TypedConfig: + if v == nil { + err := PrivateKeyProviderValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PrivateKeyProviderMultiError(errors) + } + + return nil +} + +// PrivateKeyProviderMultiError is an error wrapping multiple validation errors +// returned by PrivateKeyProvider.ValidateAll() if the designated constraints +// aren't met. +type PrivateKeyProviderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PrivateKeyProviderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PrivateKeyProviderMultiError) AllErrors() []error { return m } + +// PrivateKeyProviderValidationError is the validation error returned by +// PrivateKeyProvider.Validate if the designated constraints aren't met. +type PrivateKeyProviderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PrivateKeyProviderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PrivateKeyProviderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PrivateKeyProviderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PrivateKeyProviderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PrivateKeyProviderValidationError) ErrorName() string { + return "PrivateKeyProviderValidationError" +} + +// Error satisfies the builtin error interface +func (e PrivateKeyProviderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrivateKeyProvider.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PrivateKeyProviderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PrivateKeyProviderValidationError{} + +// Validate checks the field values on TlsCertificate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsCertificate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsCertificate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsCertificateMultiError, +// or nil if none found. +func (m *TlsCertificate) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsCertificate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCertificateChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCertificateChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPkcs12()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPkcs12()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKeyProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKeyProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPassword()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPassword()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOcspStaple()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcspStaple()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSignedCertificateTimestamp() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsCertificateMultiError(errors) + } + + return nil +} + +// TlsCertificateMultiError is an error wrapping multiple validation errors +// returned by TlsCertificate.ValidateAll() if the designated constraints +// aren't met. +type TlsCertificateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsCertificateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsCertificateMultiError) AllErrors() []error { return m } + +// TlsCertificateValidationError is the validation error returned by +// TlsCertificate.Validate if the designated constraints aren't met. +type TlsCertificateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsCertificateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsCertificateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsCertificateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsCertificateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsCertificateValidationError) ErrorName() string { return "TlsCertificateValidationError" } + +// Error satisfies the builtin error interface +func (e TlsCertificateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsCertificate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsCertificateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsCertificateValidationError{} + +// Validate checks the field values on TlsSessionTicketKeys with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TlsSessionTicketKeys) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsSessionTicketKeys with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TlsSessionTicketKeysMultiError, or nil if none found. +func (m *TlsSessionTicketKeys) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsSessionTicketKeys) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetKeys()) < 1 { + err := TlsSessionTicketKeysValidationError{ + field: "Keys", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetKeys() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsSessionTicketKeysMultiError(errors) + } + + return nil +} + +// TlsSessionTicketKeysMultiError is an error wrapping multiple validation +// errors returned by TlsSessionTicketKeys.ValidateAll() if the designated +// constraints aren't met. +type TlsSessionTicketKeysMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsSessionTicketKeysMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsSessionTicketKeysMultiError) AllErrors() []error { return m } + +// TlsSessionTicketKeysValidationError is the validation error returned by +// TlsSessionTicketKeys.Validate if the designated constraints aren't met. +type TlsSessionTicketKeysValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsSessionTicketKeysValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsSessionTicketKeysValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsSessionTicketKeysValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsSessionTicketKeysValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsSessionTicketKeysValidationError) ErrorName() string { + return "TlsSessionTicketKeysValidationError" +} + +// Error satisfies the builtin error interface +func (e TlsSessionTicketKeysValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsSessionTicketKeys.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsSessionTicketKeysValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsSessionTicketKeysValidationError{} + +// Validate checks the field values on CertificateProviderPluginInstance with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *CertificateProviderPluginInstance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateProviderPluginInstance +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// CertificateProviderPluginInstanceMultiError, or nil if none found. +func (m *CertificateProviderPluginInstance) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateProviderPluginInstance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for InstanceName + + // no validation rules for CertificateName + + if len(errors) > 0 { + return CertificateProviderPluginInstanceMultiError(errors) + } + + return nil +} + +// CertificateProviderPluginInstanceMultiError is an error wrapping multiple +// validation errors returned by +// CertificateProviderPluginInstance.ValidateAll() if the designated +// constraints aren't met. +type CertificateProviderPluginInstanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateProviderPluginInstanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateProviderPluginInstanceMultiError) AllErrors() []error { return m } + +// CertificateProviderPluginInstanceValidationError is the validation error +// returned by CertificateProviderPluginInstance.Validate if the designated +// constraints aren't met. +type CertificateProviderPluginInstanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateProviderPluginInstanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateProviderPluginInstanceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateProviderPluginInstanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateProviderPluginInstanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateProviderPluginInstanceValidationError) ErrorName() string { + return "CertificateProviderPluginInstanceValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateProviderPluginInstanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateProviderPluginInstance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateProviderPluginInstanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateProviderPluginInstanceValidationError{} + +// Validate checks the field values on SubjectAltNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubjectAltNameMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubjectAltNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubjectAltNameMatcherMultiError, or nil if none found. +func (m *SubjectAltNameMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *SubjectAltNameMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _SubjectAltNameMatcher_SanType_NotInLookup[m.GetSanType()]; ok { + err := SubjectAltNameMatcherValidationError{ + field: "SanType", + reason: "value must not be in list [SAN_TYPE_UNSPECIFIED]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := SubjectAltNameMatcher_SanType_name[int32(m.GetSanType())]; !ok { + err := SubjectAltNameMatcherValidationError{ + field: "SanType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMatcher() == nil { + err := SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Oid + + if len(errors) > 0 { + return SubjectAltNameMatcherMultiError(errors) + } + + return nil +} + +// SubjectAltNameMatcherMultiError is an error wrapping multiple validation +// errors returned by SubjectAltNameMatcher.ValidateAll() if the designated +// constraints aren't met. +type SubjectAltNameMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubjectAltNameMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubjectAltNameMatcherMultiError) AllErrors() []error { return m } + +// SubjectAltNameMatcherValidationError is the validation error returned by +// SubjectAltNameMatcher.Validate if the designated constraints aren't met. +type SubjectAltNameMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubjectAltNameMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubjectAltNameMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubjectAltNameMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubjectAltNameMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubjectAltNameMatcherValidationError) ErrorName() string { + return "SubjectAltNameMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e SubjectAltNameMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubjectAltNameMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubjectAltNameMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubjectAltNameMatcherValidationError{} + +var _SubjectAltNameMatcher_SanType_NotInLookup = map[SubjectAltNameMatcher_SanType]struct{}{ + 0: {}, +} + +// Validate checks the field values on CertificateValidationContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateValidationContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateValidationContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateValidationContextMultiError, or nil if none found. +func (m *CertificateValidationContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateValidationContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTrustedCa()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrustedCa()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCaCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCaCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSystemRootCerts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSystemRootCerts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetVerifyCertificateSpki() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 44 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateSpki[%v]", idx), + reason: "value length must be at least 44 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(item) > 44 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateSpki[%v]", idx), + reason: "value length must be at most 44 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetVerifyCertificateHash() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 64 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateHash[%v]", idx), + reason: "value length must be at least 64 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(item) > 95 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateHash[%v]", idx), + reason: "value length must be at most 95 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetMatchTypedSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetMatchSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRequireSignedCertificateTimestamp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireSignedCertificateTimestamp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCrl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCrl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowExpiredCertificate + + if _, ok := CertificateValidationContext_TrustChainVerification_name[int32(m.GetTrustChainVerification())]; !ok { + err := CertificateValidationContextValidationError{ + field: "TrustChainVerification", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCustomValidatorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomValidatorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for OnlyVerifyLeafCertCrl + + if wrapper := m.GetMaxVerifyDepth(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := CertificateValidationContextValidationError{ + field: "MaxVerifyDepth", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CertificateValidationContextMultiError(errors) + } + + return nil +} + +// CertificateValidationContextMultiError is an error wrapping multiple +// validation errors returned by CertificateValidationContext.ValidateAll() if +// the designated constraints aren't met. +type CertificateValidationContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateValidationContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateValidationContextMultiError) AllErrors() []error { return m } + +// CertificateValidationContextValidationError is the validation error returned +// by CertificateValidationContext.Validate if the designated constraints +// aren't met. +type CertificateValidationContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationContextValidationError) ErrorName() string { + return "CertificateValidationContextValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateValidationContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateValidationContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationContextValidationError{} + +// Validate checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CertificateValidationContext_SystemRootCerts) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CertificateValidationContext_SystemRootCertsMultiError, or nil if none found. +func (m *CertificateValidationContext_SystemRootCerts) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateValidationContext_SystemRootCerts) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return CertificateValidationContext_SystemRootCertsMultiError(errors) + } + + return nil +} + +// CertificateValidationContext_SystemRootCertsMultiError is an error wrapping +// multiple validation errors returned by +// CertificateValidationContext_SystemRootCerts.ValidateAll() if the +// designated constraints aren't met. +type CertificateValidationContext_SystemRootCertsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateValidationContext_SystemRootCertsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateValidationContext_SystemRootCertsMultiError) AllErrors() []error { return m } + +// CertificateValidationContext_SystemRootCertsValidationError is the +// validation error returned by +// CertificateValidationContext_SystemRootCerts.Validate if the designated +// constraints aren't met. +type CertificateValidationContext_SystemRootCertsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationContext_SystemRootCertsValidationError) ErrorName() string { + return "CertificateValidationContext_SystemRootCertsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateValidationContext_SystemRootCertsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateValidationContext_SystemRootCerts.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationContext_SystemRootCertsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationContext_SystemRootCertsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go new file mode 100644 index 000000000..00d5573d9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go @@ -0,0 +1,1155 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TlsParameters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsParameters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsParameters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SignatureAlgorithms) > 0 { + for iNdEx := len(m.SignatureAlgorithms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SignatureAlgorithms[iNdEx]) + copy(dAtA[i:], m.SignatureAlgorithms[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SignatureAlgorithms[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.EcdhCurves) > 0 { + for iNdEx := len(m.EcdhCurves) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EcdhCurves[iNdEx]) + copy(dAtA[i:], m.EcdhCurves[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EcdhCurves[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CipherSuites) > 0 { + for iNdEx := len(m.CipherSuites) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CipherSuites[iNdEx]) + copy(dAtA[i:], m.CipherSuites[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CipherSuites[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.TlsMaximumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMaximumProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.TlsMinimumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMinimumProtocolVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrivateKeyProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Fallback { + i-- + if m.Fallback { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.ConfigType.(*PrivateKeyProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ProviderName) > 0 { + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TlsCertificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Pkcs12 != nil { + if vtmsg, ok := interface{}(m.Pkcs12).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Pkcs12) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.PrivateKeyProvider != nil { + size, err := m.PrivateKeyProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.SignedCertificateTimestamp) > 0 { + for iNdEx := len(m.SignedCertificateTimestamp) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SignedCertificateTimestamp[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SignedCertificateTimestamp[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.OcspStaple != nil { + if vtmsg, ok := interface{}(m.OcspStaple).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcspStaple) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Password != nil { + if vtmsg, ok := interface{}(m.Password).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Password) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + if vtmsg, ok := interface{}(m.PrivateKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrivateKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.CertificateChain != nil { + if vtmsg, ok := interface{}(m.CertificateChain).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CertificateChain) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsSessionTicketKeys) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsSessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsSessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Keys[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Keys[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateProviderPluginInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateProviderPluginInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateProviderPluginInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAltNameMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAltNameMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAltNameMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Oid) > 0 { + i -= len(m.Oid) + copy(dAtA[i:], m.Oid) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Oid))) + i-- + dAtA[i] = 0x1a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SanType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SanType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SystemRootCerts != nil { + size, err := m.SystemRootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.MaxVerifyDepth != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for iNdEx := len(m.MatchTypedSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.MatchTypedSubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.OnlyVerifyLeafCertCrl { + i-- + if m.OnlyVerifyLeafCertCrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.CaCertificateProviderInstance != nil { + size, err := m.CaCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CustomValidatorConfig != nil { + if vtmsg, ok := interface{}(m.CustomValidatorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomValidatorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.TrustChainVerification != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrustChainVerification)) + i-- + dAtA[i] = 0x50 + } + if len(m.MatchSubjectAltNames) > 0 { + for iNdEx := len(m.MatchSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.MatchSubjectAltNames[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MatchSubjectAltNames[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if m.AllowExpiredCertificate { + i-- + if m.AllowExpiredCertificate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Crl != nil { + if vtmsg, ok := interface{}(m.Crl).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Crl) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.RequireSignedCertificateTimestamp != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.VerifyCertificateSpki) > 0 { + for iNdEx := len(m.VerifyCertificateSpki) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateSpki[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateSpki[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateSpki[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VerifyCertificateHash) > 0 { + for iNdEx := len(m.VerifyCertificateHash) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateHash[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateHash[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateHash[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TrustedCa != nil { + if vtmsg, ok := interface{}(m.TrustedCa).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustedCa) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsParameters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsMinimumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMinimumProtocolVersion)) + } + if m.TlsMaximumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMaximumProtocolVersion)) + } + if len(m.CipherSuites) > 0 { + for _, s := range m.CipherSuites { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.EcdhCurves) > 0 { + for _, s := range m.EcdhCurves { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SignatureAlgorithms) > 0 { + for _, s := range m.SignatureAlgorithms { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProviderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Fallback { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CertificateChain != nil { + if size, ok := interface{}(m.CertificateChain).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CertificateChain) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + if size, ok := interface{}(m.PrivateKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrivateKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Password != nil { + if size, ok := interface{}(m.Password).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Password) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaple != nil { + if size, ok := interface{}(m.OcspStaple).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcspStaple) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SignedCertificateTimestamp) > 0 { + for _, e := range m.SignedCertificateTimestamp { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PrivateKeyProvider != nil { + l = m.PrivateKeyProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Pkcs12 != nil { + if size, ok := interface{}(m.Pkcs12).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Pkcs12) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TlsSessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, e := range m.Keys { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateProviderPluginInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAltNameMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SanType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SanType)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Oid) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext_SystemRootCerts) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TrustedCa != nil { + if size, ok := interface{}(m.TrustedCa).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustedCa) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VerifyCertificateHash) > 0 { + for _, s := range m.VerifyCertificateHash { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.VerifyCertificateSpki) > 0 { + for _, s := range m.VerifyCertificateSpki { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireSignedCertificateTimestamp != nil { + l = (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Crl != nil { + if size, ok := interface{}(m.Crl).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Crl) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExpiredCertificate { + n += 2 + } + if len(m.MatchSubjectAltNames) > 0 { + for _, e := range m.MatchSubjectAltNames { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TrustChainVerification != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TrustChainVerification)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomValidatorConfig != nil { + if size, ok := interface{}(m.CustomValidatorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomValidatorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CaCertificateProviderInstance != nil { + l = m.CaCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnlyVerifyLeafCertCrl { + n += 2 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for _, e := range m.MatchTypedSubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxVerifyDepth != nil { + l = (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SystemRootCerts != nil { + l = m.SystemRootCerts.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go new file mode 100644 index 000000000..cf919ed97 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GenericSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Secret of generic type and is available to filters. + Secret *v3.DataSource `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` +} + +func (x *GenericSecret) Reset() { + *x = GenericSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenericSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenericSecret) ProtoMessage() {} + +func (x *GenericSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenericSecret.ProtoReflect.Descriptor instead. +func (*GenericSecret) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{0} +} + +func (x *GenericSecret) GetSecret() *v3.DataSource { + if x != nil { + return x.Secret + } + return nil +} + +type SdsSecretConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name by which the secret can be uniquely referred to. When both name and config are specified, + // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret + // will be loaded from static resources. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + SdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=sds_config,json=sdsConfig,proto3" json:"sds_config,omitempty"` +} + +func (x *SdsSecretConfig) Reset() { + *x = SdsSecretConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SdsSecretConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SdsSecretConfig) ProtoMessage() {} + +func (x *SdsSecretConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SdsSecretConfig.ProtoReflect.Descriptor instead. +func (*SdsSecretConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{1} +} + +func (x *SdsSecretConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SdsSecretConfig) GetSdsConfig() *v3.ConfigSource { + if x != nil { + return x.SdsConfig + } + return nil +} + +// [#next-free-field: 6] +type Secret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to Type: + // + // *Secret_TlsCertificate + // *Secret_SessionTicketKeys + // *Secret_ValidationContext + // *Secret_GenericSecret + Type isSecret_Type `protobuf_oneof:"type"` +} + +func (x *Secret) Reset() { + *x = Secret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Secret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Secret) ProtoMessage() {} + +func (x *Secret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Secret.ProtoReflect.Descriptor instead. +func (*Secret) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{2} +} + +func (x *Secret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Secret) GetType() isSecret_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *Secret) GetTlsCertificate() *TlsCertificate { + if x, ok := x.GetType().(*Secret_TlsCertificate); ok { + return x.TlsCertificate + } + return nil +} + +func (x *Secret) GetSessionTicketKeys() *TlsSessionTicketKeys { + if x, ok := x.GetType().(*Secret_SessionTicketKeys); ok { + return x.SessionTicketKeys + } + return nil +} + +func (x *Secret) GetValidationContext() *CertificateValidationContext { + if x, ok := x.GetType().(*Secret_ValidationContext); ok { + return x.ValidationContext + } + return nil +} + +func (x *Secret) GetGenericSecret() *GenericSecret { + if x, ok := x.GetType().(*Secret_GenericSecret); ok { + return x.GenericSecret + } + return nil +} + +type isSecret_Type interface { + isSecret_Type() +} + +type Secret_TlsCertificate struct { + TlsCertificate *TlsCertificate `protobuf:"bytes,2,opt,name=tls_certificate,json=tlsCertificate,proto3,oneof"` +} + +type Secret_SessionTicketKeys struct { + SessionTicketKeys *TlsSessionTicketKeys `protobuf:"bytes,3,opt,name=session_ticket_keys,json=sessionTicketKeys,proto3,oneof"` +} + +type Secret_ValidationContext struct { + ValidationContext *CertificateValidationContext `protobuf:"bytes,4,opt,name=validation_context,json=validationContext,proto3,oneof"` +} + +type Secret_GenericSecret struct { + GenericSecret *GenericSecret `protobuf:"bytes,5,opt,name=generic_secret,json=genericSecret,proto3,oneof"` +} + +func (*Secret_TlsCertificate) isSecret_Type() {} + +func (*Secret_SessionTicketKeys) isSecret_Type() {} + +func (*Secret_ValidationContext) isSecret_Type() {} + +func (*Secret_GenericSecret) isSecret_Type() {} + +var File_envoy_extensions_transport_sockets_tls_v3_secret_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x9b, + 0x01, 0x0a, 0x0f, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x41, 0x0a, 0x0a, 0x73, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x73, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x53, 0x64, 0x73, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xfb, 0x03, 0x0a, + 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x0f, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x12, 0x71, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x48, + 0x00, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x61, + 0x0a, 0x0e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x3a, 0x1f, 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes = []interface{}{ + (*GenericSecret)(nil), // 0: envoy.extensions.transport_sockets.tls.v3.GenericSecret + (*SdsSecretConfig)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + (*Secret)(nil), // 2: envoy.extensions.transport_sockets.tls.v3.Secret + (*v3.DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*v3.ConfigSource)(nil), // 4: envoy.config.core.v3.ConfigSource + (*TlsCertificate)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*TlsSessionTicketKeys)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*CertificateValidationContext)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +} +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs = []int32{ + 3, // 0: envoy.extensions.transport_sockets.tls.v3.GenericSecret.secret:type_name -> envoy.config.core.v3.DataSource + 4, // 1: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig.sds_config:type_name -> envoy.config.core.v3.ConfigSource + 5, // 2: envoy.extensions.transport_sockets.tls.v3.Secret.tls_certificate:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate + 6, // 3: envoy.extensions.transport_sockets.tls.v3.Secret.session_ticket_keys:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + 7, // 4: envoy.extensions.transport_sockets.tls.v3.Secret.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 0, // 5: envoy.extensions.transport_sockets.tls.v3.Secret.generic_secret:type_name -> envoy.extensions.transport_sockets.tls.v3.GenericSecret + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_secret_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenericSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SdsSecretConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Secret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Secret_TlsCertificate)(nil), + (*Secret_SessionTicketKeys)(nil), + (*Secret_ValidationContext)(nil), + (*Secret_GenericSecret)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_secret_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go new file mode 100644 index 000000000..913c54922 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go @@ -0,0 +1,575 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GenericSecret with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GenericSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GenericSecret with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GenericSecretMultiError, or +// nil if none found. +func (m *GenericSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *GenericSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GenericSecretMultiError(errors) + } + + return nil +} + +// GenericSecretMultiError is an error wrapping multiple validation errors +// returned by GenericSecret.ValidateAll() if the designated constraints +// aren't met. +type GenericSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GenericSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GenericSecretMultiError) AllErrors() []error { return m } + +// GenericSecretValidationError is the validation error returned by +// GenericSecret.Validate if the designated constraints aren't met. +type GenericSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GenericSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GenericSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GenericSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GenericSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GenericSecretValidationError) ErrorName() string { return "GenericSecretValidationError" } + +// Error satisfies the builtin error interface +func (e GenericSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGenericSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GenericSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GenericSecretValidationError{} + +// Validate checks the field values on SdsSecretConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SdsSecretConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SdsSecretConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SdsSecretConfigMultiError, or nil if none found. +func (m *SdsSecretConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SdsSecretConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := SdsSecretConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SdsSecretConfigMultiError(errors) + } + + return nil +} + +// SdsSecretConfigMultiError is an error wrapping multiple validation errors +// returned by SdsSecretConfig.ValidateAll() if the designated constraints +// aren't met. +type SdsSecretConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SdsSecretConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SdsSecretConfigMultiError) AllErrors() []error { return m } + +// SdsSecretConfigValidationError is the validation error returned by +// SdsSecretConfig.Validate if the designated constraints aren't met. +type SdsSecretConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SdsSecretConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SdsSecretConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SdsSecretConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SdsSecretConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SdsSecretConfigValidationError) ErrorName() string { return "SdsSecretConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SdsSecretConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSdsSecretConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SdsSecretConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SdsSecretConfigValidationError{} + +// Validate checks the field values on Secret with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Secret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Secret with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in SecretMultiError, or nil if none found. +func (m *Secret) ValidateAll() error { + return m.validate(true) +} + +func (m *Secret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.Type.(type) { + case *Secret_TlsCertificate: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTlsCertificate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_SessionTicketKeys: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_ValidationContext: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_GenericSecret: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGenericSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenericSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SecretMultiError(errors) + } + + return nil +} + +// SecretMultiError is an error wrapping multiple validation errors returned by +// Secret.ValidateAll() if the designated constraints aren't met. +type SecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretMultiError) AllErrors() []error { return m } + +// SecretValidationError is the validation error returned by Secret.Validate if +// the designated constraints aren't met. +type SecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretValidationError) ErrorName() string { return "SecretValidationError" } + +// Error satisfies the builtin error interface +func (e SecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go new file mode 100644 index 000000000..35e8a3ce2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go @@ -0,0 +1,415 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GenericSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + if vtmsg, ok := interface{}(m.Secret).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secret) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SdsSecretConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SdsConfig != nil { + if vtmsg, ok := interface{}(m.SdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*Secret_GenericSecret); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_TlsCertificate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret_TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TlsCertificate != nil { + size, err := m.TlsCertificate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Secret_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Secret_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Secret_GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericSecret != nil { + size, err := m.GenericSecret.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + if size, ok := interface{}(m.Secret).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Secret) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SdsConfig != nil { + if size, ok := interface{}(m.SdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Secret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Secret_TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsCertificate != nil { + l = m.TlsCertificate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericSecret != nil { + l = m.GenericSecret.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go new file mode 100644 index 000000000..db43da674 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go @@ -0,0 +1,1545 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DownstreamTlsContext_OcspStaplePolicy int32 + +const ( + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + DownstreamTlsContext_LENIENT_STAPLING DownstreamTlsContext_OcspStaplePolicy = 0 + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + DownstreamTlsContext_STRICT_STAPLING DownstreamTlsContext_OcspStaplePolicy = 1 + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + DownstreamTlsContext_MUST_STAPLE DownstreamTlsContext_OcspStaplePolicy = 2 +) + +// Enum value maps for DownstreamTlsContext_OcspStaplePolicy. +var ( + DownstreamTlsContext_OcspStaplePolicy_name = map[int32]string{ + 0: "LENIENT_STAPLING", + 1: "STRICT_STAPLING", + 2: "MUST_STAPLE", + } + DownstreamTlsContext_OcspStaplePolicy_value = map[string]int32{ + "LENIENT_STAPLING": 0, + "STRICT_STAPLING": 1, + "MUST_STAPLE": 2, + } +) + +func (x DownstreamTlsContext_OcspStaplePolicy) Enum() *DownstreamTlsContext_OcspStaplePolicy { + p := new(DownstreamTlsContext_OcspStaplePolicy) + *p = x + return p +} + +func (x DownstreamTlsContext_OcspStaplePolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DownstreamTlsContext_OcspStaplePolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes[0].Descriptor() +} + +func (DownstreamTlsContext_OcspStaplePolicy) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes[0] +} + +func (x DownstreamTlsContext_OcspStaplePolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DownstreamTlsContext_OcspStaplePolicy.Descriptor instead. +func (DownstreamTlsContext_OcspStaplePolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{1, 0} +} + +// [#next-free-field: 6] +type UpstreamTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext *CommonTlsContext `protobuf:"bytes,1,opt,name=common_tls_context,json=commonTlsContext,proto3" json:"common_tls_context,omitempty"` + // SNI string to use when creating TLS backend connections. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + AllowRenegotiation bool `protobuf:"varint,3,opt,name=allow_renegotiation,json=allowRenegotiation,proto3" json:"allow_renegotiation,omitempty"` + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + MaxSessionKeys *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_session_keys,json=maxSessionKeys,proto3" json:"max_session_keys,omitempty"` + // This field is used to control the enforcement, whereby the handshake will fail if the keyUsage extension + // is present and incompatible with the TLS usage. Currently, the default value is false (i.e., enforcement off) + // but it is expected to be changed to true by default in a future release. + // “ssl.was_key_usage_invalid“ in :ref:`listener metrics ` will be set for certificate + // configurations that would fail if this option were set to true. + EnforceRsaKeyUsage *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enforce_rsa_key_usage,json=enforceRsaKeyUsage,proto3" json:"enforce_rsa_key_usage,omitempty"` +} + +func (x *UpstreamTlsContext) Reset() { + *x = UpstreamTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamTlsContext) ProtoMessage() {} + +func (x *UpstreamTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamTlsContext.ProtoReflect.Descriptor instead. +func (*UpstreamTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{0} +} + +func (x *UpstreamTlsContext) GetCommonTlsContext() *CommonTlsContext { + if x != nil { + return x.CommonTlsContext + } + return nil +} + +func (x *UpstreamTlsContext) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +func (x *UpstreamTlsContext) GetAllowRenegotiation() bool { + if x != nil { + return x.AllowRenegotiation + } + return false +} + +func (x *UpstreamTlsContext) GetMaxSessionKeys() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxSessionKeys + } + return nil +} + +func (x *UpstreamTlsContext) GetEnforceRsaKeyUsage() *wrapperspb.BoolValue { + if x != nil { + return x.EnforceRsaKeyUsage + } + return nil +} + +// [#next-free-field: 12] +type DownstreamTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common TLS context settings. + CommonTlsContext *CommonTlsContext `protobuf:"bytes,1,opt,name=common_tls_context,json=commonTlsContext,proto3" json:"common_tls_context,omitempty"` + // If specified, Envoy will reject connections without a valid client + // certificate. + RequireClientCertificate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=require_client_certificate,json=requireClientCertificate,proto3" json:"require_client_certificate,omitempty"` + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + RequireSni *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=require_sni,json=requireSni,proto3" json:"require_sni,omitempty"` + // Types that are assignable to SessionTicketKeysType: + // + // *DownstreamTlsContext_SessionTicketKeys + // *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig + // *DownstreamTlsContext_DisableStatelessSessionResumption + SessionTicketKeysType isDownstreamTlsContext_SessionTicketKeysType `protobuf_oneof:"session_ticket_keys_type"` + // If set to true, the TLS server will not maintain a session cache of TLS sessions. (This is + // relevant only for TLSv1.2 and earlier.) + DisableStatefulSessionResumption bool `protobuf:"varint,10,opt,name=disable_stateful_session_resumption,json=disableStatefulSessionResumption,proto3" json:"disable_stateful_session_resumption,omitempty"` + // If specified, “session_timeout“ will change the maximum lifetime (in seconds) of the TLS session. + // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only seconds can be specified (fractional seconds are ignored). + SessionTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_timeout,json=sessionTimeout,proto3" json:"session_timeout,omitempty"` + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy DownstreamTlsContext_OcspStaplePolicy `protobuf:"varint,8,opt,name=ocsp_staple_policy,json=ocspStaplePolicy,proto3,enum=envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext_OcspStaplePolicy" json:"ocsp_staple_policy,omitempty"` + // Multiple certificates are allowed in Downstream transport socket to serve different SNI. + // If the client provides SNI but no such cert matched, it will decide to full scan certificates or not based on this config. + // Defaults to false. See more details in :ref:`Multiple TLS certificates `. + FullScanCertsOnSniMismatch *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=full_scan_certs_on_sni_mismatch,json=fullScanCertsOnSniMismatch,proto3" json:"full_scan_certs_on_sni_mismatch,omitempty"` + // By default, Envoy as a server uses its preferred cipher during the handshake. + // Setting this to true would allow the downstream client's preferred cipher to be used instead. + // Has no effect when using TLSv1_3. + PreferClientCiphers bool `protobuf:"varint,11,opt,name=prefer_client_ciphers,json=preferClientCiphers,proto3" json:"prefer_client_ciphers,omitempty"` +} + +func (x *DownstreamTlsContext) Reset() { + *x = DownstreamTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownstreamTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownstreamTlsContext) ProtoMessage() {} + +func (x *DownstreamTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownstreamTlsContext.ProtoReflect.Descriptor instead. +func (*DownstreamTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{1} +} + +func (x *DownstreamTlsContext) GetCommonTlsContext() *CommonTlsContext { + if x != nil { + return x.CommonTlsContext + } + return nil +} + +func (x *DownstreamTlsContext) GetRequireClientCertificate() *wrapperspb.BoolValue { + if x != nil { + return x.RequireClientCertificate + } + return nil +} + +func (x *DownstreamTlsContext) GetRequireSni() *wrapperspb.BoolValue { + if x != nil { + return x.RequireSni + } + return nil +} + +func (m *DownstreamTlsContext) GetSessionTicketKeysType() isDownstreamTlsContext_SessionTicketKeysType { + if m != nil { + return m.SessionTicketKeysType + } + return nil +} + +func (x *DownstreamTlsContext) GetSessionTicketKeys() *TlsSessionTicketKeys { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_SessionTicketKeys); ok { + return x.SessionTicketKeys + } + return nil +} + +func (x *DownstreamTlsContext) GetSessionTicketKeysSdsSecretConfig() *SdsSecretConfig { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig); ok { + return x.SessionTicketKeysSdsSecretConfig + } + return nil +} + +func (x *DownstreamTlsContext) GetDisableStatelessSessionResumption() bool { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_DisableStatelessSessionResumption); ok { + return x.DisableStatelessSessionResumption + } + return false +} + +func (x *DownstreamTlsContext) GetDisableStatefulSessionResumption() bool { + if x != nil { + return x.DisableStatefulSessionResumption + } + return false +} + +func (x *DownstreamTlsContext) GetSessionTimeout() *durationpb.Duration { + if x != nil { + return x.SessionTimeout + } + return nil +} + +func (x *DownstreamTlsContext) GetOcspStaplePolicy() DownstreamTlsContext_OcspStaplePolicy { + if x != nil { + return x.OcspStaplePolicy + } + return DownstreamTlsContext_LENIENT_STAPLING +} + +func (x *DownstreamTlsContext) GetFullScanCertsOnSniMismatch() *wrapperspb.BoolValue { + if x != nil { + return x.FullScanCertsOnSniMismatch + } + return nil +} + +func (x *DownstreamTlsContext) GetPreferClientCiphers() bool { + if x != nil { + return x.PreferClientCiphers + } + return false +} + +type isDownstreamTlsContext_SessionTicketKeysType interface { + isDownstreamTlsContext_SessionTicketKeysType() +} + +type DownstreamTlsContext_SessionTicketKeys struct { + // TLS session ticket key settings. + SessionTicketKeys *TlsSessionTicketKeys `protobuf:"bytes,4,opt,name=session_ticket_keys,json=sessionTicketKeys,proto3,oneof"` +} + +type DownstreamTlsContext_SessionTicketKeysSdsSecretConfig struct { + // Config for fetching TLS session ticket keys via SDS API. + SessionTicketKeysSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,5,opt,name=session_ticket_keys_sds_secret_config,json=sessionTicketKeysSdsSecretConfig,proto3,oneof"` +} + +type DownstreamTlsContext_DisableStatelessSessionResumption struct { + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + DisableStatelessSessionResumption bool `protobuf:"varint,7,opt,name=disable_stateless_session_resumption,json=disableStatelessSessionResumption,proto3,oneof"` +} + +func (*DownstreamTlsContext_SessionTicketKeys) isDownstreamTlsContext_SessionTicketKeysType() {} + +func (*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) isDownstreamTlsContext_SessionTicketKeysType() { +} + +func (*DownstreamTlsContext_DisableStatelessSessionResumption) isDownstreamTlsContext_SessionTicketKeysType() { +} + +// TLS key log configuration. +// The key log file format is "format used by NSS for its SSLKEYLOGFILE debugging output" (text taken from openssl man page) +type TlsKeyLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path to save the TLS key log. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // The local IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any local IP address will be matched. + LocalAddressRange []*v3.CidrRange `protobuf:"bytes,2,rep,name=local_address_range,json=localAddressRange,proto3" json:"local_address_range,omitempty"` + // The remote IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any remote IP address will be matched. + RemoteAddressRange []*v3.CidrRange `protobuf:"bytes,3,rep,name=remote_address_range,json=remoteAddressRange,proto3" json:"remote_address_range,omitempty"` +} + +func (x *TlsKeyLog) Reset() { + *x = TlsKeyLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsKeyLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsKeyLog) ProtoMessage() {} + +func (x *TlsKeyLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsKeyLog.ProtoReflect.Descriptor instead. +func (*TlsKeyLog) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{2} +} + +func (x *TlsKeyLog) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *TlsKeyLog) GetLocalAddressRange() []*v3.CidrRange { + if x != nil { + return x.LocalAddressRange + } + return nil +} + +func (x *TlsKeyLog) GetRemoteAddressRange() []*v3.CidrRange { + if x != nil { + return x.RemoteAddressRange + } + return nil +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 17] +type CommonTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TLS protocol versions, cipher suites etc. + TlsParams *TlsParameters `protobuf:"bytes,1,opt,name=tls_params,json=tlsParams,proto3" json:"tls_params,omitempty"` + // Only a single TLS certificate is supported in client contexts. In server contexts, + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates and support SNI-based selection. + // + // If “tls_certificate_provider_instance“ is set, this field is ignored. + // If this field is set, “tls_certificate_sds_secret_configs“ is ignored. + TlsCertificates []*TlsCertificate `protobuf:"bytes,2,rep,name=tls_certificates,json=tlsCertificates,proto3" json:"tls_certificates,omitempty"` + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // + // The same number and types of certificates as :ref:`tls_certificates ` + // are valid in the the certificates fetched through this setting. + // + // If “tls_certificates“ or “tls_certificate_provider_instance“ are set, this field + // is ignored. + TlsCertificateSdsSecretConfigs []*SdsSecretConfig `protobuf:"bytes,6,rep,name=tls_certificate_sds_secret_configs,json=tlsCertificateSdsSecretConfigs,proto3" json:"tls_certificate_sds_secret_configs,omitempty"` + // Certificate provider instance for fetching TLS certs. + // + // If this field is set, “tls_certificates“ and “tls_certificate_provider_instance“ + // are ignored. + // [#not-implemented-hide:] + TlsCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,14,opt,name=tls_certificate_provider_instance,json=tlsCertificateProviderInstance,proto3" json:"tls_certificate_provider_instance,omitempty"` + // Custom TLS certificate selector. + // + // Select TLS certificate based on TLS client hello. + // If empty, defaults to native TLS certificate selection behavior: + // DNS SANs or Subject Common Name in TLS certificates is extracted as server name pattern to match SNI. + CustomTlsCertificateSelector *v3.TypedExtensionConfig `protobuf:"bytes,16,opt,name=custom_tls_certificate_selector,json=customTlsCertificateSelector,proto3" json:"custom_tls_certificate_selector,omitempty"` + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + TlsCertificateCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,9,opt,name=tls_certificate_certificate_provider,json=tlsCertificateCertificateProvider,proto3" json:"tls_certificate_certificate_provider,omitempty"` + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + TlsCertificateCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,11,opt,name=tls_certificate_certificate_provider_instance,json=tlsCertificateCertificateProviderInstance,proto3" json:"tls_certificate_certificate_provider_instance,omitempty"` + // Types that are assignable to ValidationContextType: + // + // *CommonTlsContext_ValidationContext + // *CommonTlsContext_ValidationContextSdsSecretConfig + // *CommonTlsContext_CombinedValidationContext + // *CommonTlsContext_ValidationContextCertificateProvider + // *CommonTlsContext_ValidationContextCertificateProviderInstance + ValidationContextType isCommonTlsContext_ValidationContextType `protobuf_oneof:"validation_context_type"` + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + AlpnProtocols []string `protobuf:"bytes,4,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"` + // Custom TLS handshaker. If empty, defaults to native TLS handshaking + // behavior. + CustomHandshaker *v3.TypedExtensionConfig `protobuf:"bytes,13,opt,name=custom_handshaker,json=customHandshaker,proto3" json:"custom_handshaker,omitempty"` + // TLS key log configuration + KeyLog *TlsKeyLog `protobuf:"bytes,15,opt,name=key_log,json=keyLog,proto3" json:"key_log,omitempty"` +} + +func (x *CommonTlsContext) Reset() { + *x = CommonTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext) ProtoMessage() {} + +func (x *CommonTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext.ProtoReflect.Descriptor instead. +func (*CommonTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3} +} + +func (x *CommonTlsContext) GetTlsParams() *TlsParameters { + if x != nil { + return x.TlsParams + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificates() []*TlsCertificate { + if x != nil { + return x.TlsCertificates + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificateSdsSecretConfigs() []*SdsSecretConfig { + if x != nil { + return x.TlsCertificateSdsSecretConfigs + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificateProviderInstance() *CertificateProviderPluginInstance { + if x != nil { + return x.TlsCertificateProviderInstance + } + return nil +} + +func (x *CommonTlsContext) GetCustomTlsCertificateSelector() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomTlsCertificateSelector + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetTlsCertificateCertificateProvider() *CommonTlsContext_CertificateProvider { + if x != nil { + return x.TlsCertificateCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetTlsCertificateCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x != nil { + return x.TlsCertificateCertificateProviderInstance + } + return nil +} + +func (m *CommonTlsContext) GetValidationContextType() isCommonTlsContext_ValidationContextType { + if m != nil { + return m.ValidationContextType + } + return nil +} + +func (x *CommonTlsContext) GetValidationContext() *CertificateValidationContext { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContext); ok { + return x.ValidationContext + } + return nil +} + +func (x *CommonTlsContext) GetValidationContextSdsSecretConfig() *SdsSecretConfig { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextSdsSecretConfig); ok { + return x.ValidationContextSdsSecretConfig + } + return nil +} + +func (x *CommonTlsContext) GetCombinedValidationContext() *CommonTlsContext_CombinedCertificateValidationContext { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_CombinedValidationContext); ok { + return x.CombinedValidationContext + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetValidationContextCertificateProvider() *CommonTlsContext_CertificateProvider { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextCertificateProvider); ok { + return x.ValidationContextCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetValidationContextCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextCertificateProviderInstance); ok { + return x.ValidationContextCertificateProviderInstance + } + return nil +} + +func (x *CommonTlsContext) GetAlpnProtocols() []string { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +func (x *CommonTlsContext) GetCustomHandshaker() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomHandshaker + } + return nil +} + +func (x *CommonTlsContext) GetKeyLog() *TlsKeyLog { + if x != nil { + return x.KeyLog + } + return nil +} + +type isCommonTlsContext_ValidationContextType interface { + isCommonTlsContext_ValidationContextType() +} + +type CommonTlsContext_ValidationContext struct { + // How to validate peer certificates. + ValidationContext *CertificateValidationContext `protobuf:"bytes,3,opt,name=validation_context,json=validationContext,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextSdsSecretConfig struct { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + ValidationContextSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,7,opt,name=validation_context_sds_secret_config,json=validationContextSdsSecretConfig,proto3,oneof"` +} + +type CommonTlsContext_CombinedValidationContext struct { + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedValidationContext *CommonTlsContext_CombinedCertificateValidationContext `protobuf:"bytes,8,opt,name=combined_validation_context,json=combinedValidationContext,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextCertificateProvider struct { + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,10,opt,name=validation_context_certificate_provider,json=validationContextCertificateProvider,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextCertificateProviderInstance struct { + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,12,opt,name=validation_context_certificate_provider_instance,json=validationContextCertificateProviderInstance,proto3,oneof"` +} + +func (*CommonTlsContext_ValidationContext) isCommonTlsContext_ValidationContextType() {} + +func (*CommonTlsContext_ValidationContextSdsSecretConfig) isCommonTlsContext_ValidationContextType() { +} + +func (*CommonTlsContext_CombinedValidationContext) isCommonTlsContext_ValidationContextType() {} + +func (*CommonTlsContext_ValidationContextCertificateProvider) isCommonTlsContext_ValidationContextType() { +} + +func (*CommonTlsContext_ValidationContextCertificateProviderInstance) isCommonTlsContext_ValidationContextType() { +} + +// Config for Certificate provider to get certificates. This provider should allow certificates to be +// fetched/refreshed over the network asynchronously with respect to the TLS handshake. +// +// DEPRECATED: This message is not currently used, but if we ever do need it, we will want to +// move it out of CommonTlsContext and into common.proto, similar to the existing +// CertificateProviderPluginInstance message. +// +// [#not-implemented-hide:] +type CommonTlsContext_CertificateProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + // + // Types that are assignable to Config: + // + // *CommonTlsContext_CertificateProvider_TypedConfig + Config isCommonTlsContext_CertificateProvider_Config `protobuf_oneof:"config"` +} + +func (x *CommonTlsContext_CertificateProvider) Reset() { + *x = CommonTlsContext_CertificateProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CertificateProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CertificateProvider) ProtoMessage() {} + +func (x *CommonTlsContext_CertificateProvider) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CertificateProvider.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CertificateProvider) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *CommonTlsContext_CertificateProvider) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *CommonTlsContext_CertificateProvider) GetConfig() isCommonTlsContext_CertificateProvider_Config { + if m != nil { + return m.Config + } + return nil +} + +func (x *CommonTlsContext_CertificateProvider) GetTypedConfig() *v3.TypedExtensionConfig { + if x, ok := x.GetConfig().(*CommonTlsContext_CertificateProvider_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isCommonTlsContext_CertificateProvider_Config interface { + isCommonTlsContext_CertificateProvider_Config() +} + +type CommonTlsContext_CertificateProvider_TypedConfig struct { + TypedConfig *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*CommonTlsContext_CertificateProvider_TypedConfig) isCommonTlsContext_CertificateProvider_Config() { +} + +// Similar to CertificateProvider above, but allows the provider instances to be configured on +// the client side instead of being sent from the control plane. +// +// DEPRECATED: This message was moved outside of CommonTlsContext +// and now lives in common.proto. +// +// [#not-implemented-hide:] +type CommonTlsContext_CertificateProviderInstance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + CertificateName string `protobuf:"bytes,2,opt,name=certificate_name,json=certificateName,proto3" json:"certificate_name,omitempty"` +} + +func (x *CommonTlsContext_CertificateProviderInstance) Reset() { + *x = CommonTlsContext_CertificateProviderInstance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CertificateProviderInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CertificateProviderInstance) ProtoMessage() {} + +func (x *CommonTlsContext_CertificateProviderInstance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CertificateProviderInstance.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CertificateProviderInstance) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *CommonTlsContext_CertificateProviderInstance) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *CommonTlsContext_CertificateProviderInstance) GetCertificateName() string { + if x != nil { + return x.CertificateName + } + return "" +} + +type CommonTlsContext_CombinedCertificateValidationContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How to validate peer certificates. + DefaultValidationContext *CertificateValidationContext `protobuf:"bytes,1,opt,name=default_validation_context,json=defaultValidationContext,proto3" json:"default_validation_context,omitempty"` + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + ValidationContextSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,2,opt,name=validation_context_sds_secret_config,json=validationContextSdsSecretConfig,proto3" json:"validation_context_sds_secret_config,omitempty"` + // Certificate provider for fetching CA certs. This will populate the + // “default_validation_context.trusted_ca“ field. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,3,opt,name=validation_context_certificate_provider,json=validationContextCertificateProvider,proto3" json:"validation_context_certificate_provider,omitempty"` + // Certificate provider instance for fetching CA certs. This will populate the + // “default_validation_context.trusted_ca“ field. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,4,opt,name=validation_context_certificate_provider_instance,json=validationContextCertificateProviderInstance,proto3" json:"validation_context_certificate_provider_instance,omitempty"` +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) Reset() { + *x = CommonTlsContext_CombinedCertificateValidationContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CombinedCertificateValidationContext) ProtoMessage() {} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CombinedCertificateValidationContext.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CombinedCertificateValidationContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetDefaultValidationContext() *CertificateValidationContext { + if x != nil { + return x.DefaultValidationContext + } + return nil +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextSdsSecretConfig() *SdsSecretConfig { + if x != nil { + return x.ValidationContextSdsSecretConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextCertificateProvider() *CommonTlsContext_CertificateProvider { + if x != nil { + return x.ValidationContextCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x != nil { + return x.ValidationContextCertificateProviderInstance + } + return nil +} + +var File_envoy_extensions_transport_sockets_tls_v3_tls_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, + 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x03, 0x0a, 0x12, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1a, 0x0a, 0x03, + 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, + 0x28, 0xff, 0x01, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x72, 0x65, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x6e, 0x65, + 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0x4d, 0x0a, 0x15, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x73, 0x61, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x52, 0x73, 0x61, 0x4b, 0x65, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, + 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0xce, 0x09, + 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, + 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x58, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x6e, 0x69, 0x12, 0x71, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x8d, 0x01, 0x0a, 0x25, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x64, 0x73, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x24, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6c, 0x65, 0x73, 0x73, + 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x21, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, + 0x0a, 0x23, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, + 0x75, 0x6c, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, + 0x0f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0xaa, 0x01, 0x0a, 0x1a, 0x06, 0x08, 0x80, 0x80, 0x80, 0x80, + 0x10, 0x32, 0x00, 0x52, 0x0e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x88, 0x01, 0x0a, 0x12, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, + 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x77, + 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x6f, 0x63, + 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, + 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x6d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x65, 0x72, + 0x74, 0x73, 0x4f, 0x6e, 0x53, 0x6e, 0x69, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x22, 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, + 0x45, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, + 0x01, 0x0a, 0x09, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xdd, 0x18, + 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x52, 0x09, 0x74, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x86, 0x01, 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0xad, 0x01, 0x0a, 0x24, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x21, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x2d, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x29, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8c, 0x01, 0x0a, 0x24, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, 0x0a, 0x1b, 0x63, 0x6f, + 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0xb5, + 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, + 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xce, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6c, + 0x6f, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x52, 0x06, + 0x6b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6d, 0x0a, 0x1b, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xa4, 0x06, 0x0a, 0x24, 0x43, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xb3, 0x01, 0x0a, + 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0xcc, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x3a, 0x4e, 0x9a, 0xc5, 0x88, 0x1e, 0x49, 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x19, 0x0a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0xa5, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x42, 0x08, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes = []interface{}{ + (DownstreamTlsContext_OcspStaplePolicy)(0), // 0: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy + (*UpstreamTlsContext)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + (*DownstreamTlsContext)(nil), // 2: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + (*TlsKeyLog)(nil), // 3: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + (*CommonTlsContext)(nil), // 4: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + (*CommonTlsContext_CertificateProvider)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + (*CommonTlsContext_CertificateProviderInstance)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + (*CommonTlsContext_CombinedCertificateValidationContext)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*TlsSessionTicketKeys)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*SdsSecretConfig)(nil), // 11: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration + (*v3.CidrRange)(nil), // 13: envoy.config.core.v3.CidrRange + (*TlsParameters)(nil), // 14: envoy.extensions.transport_sockets.tls.v3.TlsParameters + (*TlsCertificate)(nil), // 15: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*CertificateProviderPluginInstance)(nil), // 16: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + (*v3.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig + (*CertificateValidationContext)(nil), // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +} +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = []int32{ + 4, // 0: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.common_tls_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + 8, // 1: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.max_session_keys:type_name -> google.protobuf.UInt32Value + 9, // 2: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.enforce_rsa_key_usage:type_name -> google.protobuf.BoolValue + 4, // 3: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.common_tls_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + 9, // 4: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_client_certificate:type_name -> google.protobuf.BoolValue + 9, // 5: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_sni:type_name -> google.protobuf.BoolValue + 10, // 6: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + 11, // 7: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 12, // 8: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_timeout:type_name -> google.protobuf.Duration + 0, // 9: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.ocsp_staple_policy:type_name -> envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy + 9, // 10: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.full_scan_certs_on_sni_mismatch:type_name -> google.protobuf.BoolValue + 13, // 11: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.local_address_range:type_name -> envoy.config.core.v3.CidrRange + 13, // 12: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.remote_address_range:type_name -> envoy.config.core.v3.CidrRange + 14, // 13: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_params:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters + 15, // 14: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificates:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate + 11, // 15: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_sds_secret_configs:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 16, // 16: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + 17, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_tls_certificate_selector:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 18, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 7, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + 5, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 17, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + 17, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 29: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 5, // 30: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 31: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_tls_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownstreamTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsKeyLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CertificateProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CertificateProviderInstance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CombinedCertificateValidationContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*DownstreamTlsContext_SessionTicketKeys)(nil), + (*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig)(nil), + (*DownstreamTlsContext_DisableStatelessSessionResumption)(nil), + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*CommonTlsContext_ValidationContext)(nil), + (*CommonTlsContext_ValidationContextSdsSecretConfig)(nil), + (*CommonTlsContext_CombinedValidationContext)(nil), + (*CommonTlsContext_ValidationContextCertificateProvider)(nil), + (*CommonTlsContext_ValidationContextCertificateProviderInstance)(nil), + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*CommonTlsContext_CertificateProvider_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc, + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs, + EnumInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_tls_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go new file mode 100644 index 000000000..6468ff227 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go @@ -0,0 +1,1902 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamTlsContextMultiError, or nil if none found. +func (m *UpstreamTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetSni()) > 255 { + err := UpstreamTlsContextValidationError{ + field: "Sni", + reason: "value length must be at most 255 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AllowRenegotiation + + if all { + switch v := interface{}(m.GetMaxSessionKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxSessionKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnforceRsaKeyUsage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnforceRsaKeyUsage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpstreamTlsContextMultiError(errors) + } + + return nil +} + +// UpstreamTlsContextMultiError is an error wrapping multiple validation errors +// returned by UpstreamTlsContext.ValidateAll() if the designated constraints +// aren't met. +type UpstreamTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamTlsContextMultiError) AllErrors() []error { return m } + +// UpstreamTlsContextValidationError is the validation error returned by +// UpstreamTlsContext.Validate if the designated constraints aren't met. +type UpstreamTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamTlsContextValidationError) ErrorName() string { + return "UpstreamTlsContextValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamTlsContextValidationError{} + +// Validate checks the field values on DownstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DownstreamTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DownstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DownstreamTlsContextMultiError, or nil if none found. +func (m *DownstreamTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *DownstreamTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequireClientCertificate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireClientCertificate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequireSni()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireSni()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableStatefulSessionResumption + + if d := m.GetSessionTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = DownstreamTlsContextValidationError{ + field: "SessionTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(4294967296*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte || dur >= lt { + err := DownstreamTlsContextValidationError{ + field: "SessionTimeout", + reason: "value must be inside range [0s, 1193046h28m16s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if _, ok := DownstreamTlsContext_OcspStaplePolicy_name[int32(m.GetOcspStaplePolicy())]; !ok { + err := DownstreamTlsContextValidationError{ + field: "OcspStaplePolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFullScanCertsOnSniMismatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFullScanCertsOnSniMismatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PreferClientCiphers + + switch v := m.SessionTicketKeysType.(type) { + case *DownstreamTlsContext_SessionTicketKeys: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeysSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeysSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DownstreamTlsContext_DisableStatelessSessionResumption: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for DisableStatelessSessionResumption + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return DownstreamTlsContextMultiError(errors) + } + + return nil +} + +// DownstreamTlsContextMultiError is an error wrapping multiple validation +// errors returned by DownstreamTlsContext.ValidateAll() if the designated +// constraints aren't met. +type DownstreamTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DownstreamTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DownstreamTlsContextMultiError) AllErrors() []error { return m } + +// DownstreamTlsContextValidationError is the validation error returned by +// DownstreamTlsContext.Validate if the designated constraints aren't met. +type DownstreamTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DownstreamTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DownstreamTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DownstreamTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DownstreamTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DownstreamTlsContextValidationError) ErrorName() string { + return "DownstreamTlsContextValidationError" +} + +// Error satisfies the builtin error interface +func (e DownstreamTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDownstreamTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DownstreamTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DownstreamTlsContextValidationError{} + +// Validate checks the field values on TlsKeyLog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsKeyLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsKeyLog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsKeyLogMultiError, or nil +// if none found. +func (m *TlsKeyLog) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsKeyLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := TlsKeyLogValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetLocalAddressRange() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRemoteAddressRange() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsKeyLogMultiError(errors) + } + + return nil +} + +// TlsKeyLogMultiError is an error wrapping multiple validation errors returned +// by TlsKeyLog.ValidateAll() if the designated constraints aren't met. +type TlsKeyLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsKeyLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsKeyLogMultiError) AllErrors() []error { return m } + +// TlsKeyLogValidationError is the validation error returned by +// TlsKeyLog.Validate if the designated constraints aren't met. +type TlsKeyLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsKeyLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsKeyLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsKeyLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsKeyLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsKeyLogValidationError) ErrorName() string { return "TlsKeyLogValidationError" } + +// Error satisfies the builtin error interface +func (e TlsKeyLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsKeyLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsKeyLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsKeyLogValidationError{} + +// Validate checks the field values on CommonTlsContext with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommonTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommonTlsContextMultiError, or nil if none found. +func (m *CommonTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTlsParams()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsParams()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetTlsCertificates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetTlsCertificateSdsSecretConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTlsCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCustomTlsCertificateSelector()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomTlsCertificateSelector()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsCertificateCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsCertificateCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCustomHandshaker()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomHandshaker()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeyLog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyLog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ValidationContextType.(type) { + case *CommonTlsContext_ValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextSdsSecretConfig: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_CombinedValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCombinedValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCombinedValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextCertificateProvider: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextCertificateProviderInstance: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CommonTlsContextMultiError(errors) + } + + return nil +} + +// CommonTlsContextMultiError is an error wrapping multiple validation errors +// returned by CommonTlsContext.ValidateAll() if the designated constraints +// aren't met. +type CommonTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContextMultiError) AllErrors() []error { return m } + +// CommonTlsContextValidationError is the validation error returned by +// CommonTlsContext.Validate if the designated constraints aren't met. +type CommonTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContextValidationError) ErrorName() string { return "CommonTlsContextValidationError" } + +// Error satisfies the builtin error interface +func (e CommonTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContextValidationError{} + +// Validate checks the field values on CommonTlsContext_CertificateProvider +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *CommonTlsContext_CertificateProvider) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommonTlsContext_CertificateProvider +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// CommonTlsContext_CertificateProviderMultiError, or nil if none found. +func (m *CommonTlsContext_CertificateProvider) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CertificateProvider) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofConfigPresent := false + switch v := m.Config.(type) { + case *CommonTlsContext_CertificateProvider_TypedConfig: + if v == nil { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Config", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigPresent = true + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigPresent { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Config", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CommonTlsContext_CertificateProviderMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CertificateProviderMultiError is an error wrapping multiple +// validation errors returned by +// CommonTlsContext_CertificateProvider.ValidateAll() if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CertificateProviderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CertificateProviderMultiError) AllErrors() []error { return m } + +// CommonTlsContext_CertificateProviderValidationError is the validation error +// returned by CommonTlsContext_CertificateProvider.Validate if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CertificateProviderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContext_CertificateProviderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContext_CertificateProviderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContext_CertificateProviderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContext_CertificateProviderValidationError) ErrorName() string { + return "CommonTlsContext_CertificateProviderValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CertificateProviderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CertificateProvider.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CertificateProviderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CertificateProviderValidationError{} + +// Validate checks the field values on +// CommonTlsContext_CertificateProviderInstance with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext_CertificateProviderInstance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CommonTlsContext_CertificateProviderInstance with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CommonTlsContext_CertificateProviderInstanceMultiError, or nil if none found. +func (m *CommonTlsContext_CertificateProviderInstance) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CertificateProviderInstance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for InstanceName + + // no validation rules for CertificateName + + if len(errors) > 0 { + return CommonTlsContext_CertificateProviderInstanceMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CertificateProviderInstanceMultiError is an error wrapping +// multiple validation errors returned by +// CommonTlsContext_CertificateProviderInstance.ValidateAll() if the +// designated constraints aren't met. +type CommonTlsContext_CertificateProviderInstanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CertificateProviderInstanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CertificateProviderInstanceMultiError) AllErrors() []error { return m } + +// CommonTlsContext_CertificateProviderInstanceValidationError is the +// validation error returned by +// CommonTlsContext_CertificateProviderInstance.Validate if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderInstanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) ErrorName() string { + return "CommonTlsContext_CertificateProviderInstanceValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CertificateProviderInstance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CertificateProviderInstanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CertificateProviderInstanceValidationError{} + +// Validate checks the field values on +// CommonTlsContext_CombinedCertificateValidationContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext_CombinedCertificateValidationContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CommonTlsContext_CombinedCertificateValidationContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommonTlsContext_CombinedCertificateValidationContextMultiError, or nil if +// none found. +func (m *CommonTlsContext_CombinedCertificateValidationContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValidationContext() == nil { + err := CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetValidationContextSdsSecretConfig() == nil { + err := CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CommonTlsContext_CombinedCertificateValidationContextMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CombinedCertificateValidationContextMultiError is an error +// wrapping multiple validation errors returned by +// CommonTlsContext_CombinedCertificateValidationContext.ValidateAll() if the +// designated constraints aren't met. +type CommonTlsContext_CombinedCertificateValidationContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CombinedCertificateValidationContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CombinedCertificateValidationContextMultiError) AllErrors() []error { + return m +} + +// CommonTlsContext_CombinedCertificateValidationContextValidationError is the +// validation error returned by +// CommonTlsContext_CombinedCertificateValidationContext.Validate if the +// designated constraints aren't met. +type CommonTlsContext_CombinedCertificateValidationContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) ErrorName() string { + return "CommonTlsContext_CombinedCertificateValidationContextValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CombinedCertificateValidationContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CombinedCertificateValidationContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CombinedCertificateValidationContextValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go new file mode 100644 index 000000000..7e9ee8967 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go @@ -0,0 +1,286 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration specific to the `SPIFFE `_ certificate validator. +// +// Example: +// +// .. validated-code-block:: yaml +// +// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +// +// custom_validator_config: +// name: envoy.tls.cert_validator.spiffe +// typed_config: +// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig +// trust_domains: +// - name: foo.com +// trust_bundle: +// filename: "foo.pem" +// - name: envoy.com +// trust_bundle: +// filename: "envoy.pem" +// +// In this example, a presented peer certificate whose SAN matches “spiffe://foo.com/**“ is validated against +// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint +// a SVID belonging to another trust domain. That means, in this example, a SVID signed by “envoy.com“'s CA with “spiffe://foo.com/**“ +// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. +// +// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. +// +// - :ref:`allow_expired_certificate ` to allow expired certificates. +// - :ref:`match_typed_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. +type SPIFFECertValidatorConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field specifies trust domains used for validating incoming X.509-SVID(s). + TrustDomains []*SPIFFECertValidatorConfig_TrustDomain `protobuf:"bytes,1,rep,name=trust_domains,json=trustDomains,proto3" json:"trust_domains,omitempty"` +} + +func (x *SPIFFECertValidatorConfig) Reset() { + *x = SPIFFECertValidatorConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFECertValidatorConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFECertValidatorConfig) ProtoMessage() {} + +func (x *SPIFFECertValidatorConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFECertValidatorConfig.ProtoReflect.Descriptor instead. +func (*SPIFFECertValidatorConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP(), []int{0} +} + +func (x *SPIFFECertValidatorConfig) GetTrustDomains() []*SPIFFECertValidatorConfig_TrustDomain { + if x != nil { + return x.TrustDomains + } + return nil +} + +type SPIFFECertValidatorConfig_TrustDomain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the trust domain, “example.com“, “foo.bar.gov“ for example. + // Note that this must *not* have "spiffe://" prefix. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. + TrustBundle *v3.DataSource `protobuf:"bytes,2,opt,name=trust_bundle,json=trustBundle,proto3" json:"trust_bundle,omitempty"` +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) Reset() { + *x = SPIFFECertValidatorConfig_TrustDomain{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFECertValidatorConfig_TrustDomain) ProtoMessage() {} + +func (x *SPIFFECertValidatorConfig_TrustDomain) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFECertValidatorConfig_TrustDomain.ProtoReflect.Descriptor instead. +func (*SPIFFECertValidatorConfig_TrustDomain) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) GetTrustBundle() *v3.DataSource { + if x != nil { + return x.TrustBundle + } + return nil +} + +var File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x19, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x43, 0x65, 0x72, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x7f, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x43, 0x65, 0x72, 0x74, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x1a, 0x6f, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0c, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x42, 0xba, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x54, 0x6c, 0x73, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes = []interface{}{ + (*SPIFFECertValidatorConfig)(nil), // 0: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig + (*SPIFFECertValidatorConfig_TrustDomain)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain + (*v3.DataSource)(nil), // 2: envoy.config.core.v3.DataSource +} +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.trust_domains:type_name -> envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain + 2, // 1: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain.trust_bundle:type_name -> envoy.config.core.v3.DataSource + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFECertValidatorConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFECertValidatorConfig_TrustDomain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go new file mode 100644 index 000000000..4992dec75 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go @@ -0,0 +1,329 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SPIFFECertValidatorConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SPIFFECertValidatorConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SPIFFECertValidatorConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SPIFFECertValidatorConfigMultiError, or nil if none found. +func (m *SPIFFECertValidatorConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SPIFFECertValidatorConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetTrustDomains()) < 1 { + err := SPIFFECertValidatorConfigValidationError{ + field: "TrustDomains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTrustDomains() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SPIFFECertValidatorConfigMultiError(errors) + } + + return nil +} + +// SPIFFECertValidatorConfigMultiError is an error wrapping multiple validation +// errors returned by SPIFFECertValidatorConfig.ValidateAll() if the +// designated constraints aren't met. +type SPIFFECertValidatorConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SPIFFECertValidatorConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SPIFFECertValidatorConfigMultiError) AllErrors() []error { return m } + +// SPIFFECertValidatorConfigValidationError is the validation error returned by +// SPIFFECertValidatorConfig.Validate if the designated constraints aren't met. +type SPIFFECertValidatorConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SPIFFECertValidatorConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SPIFFECertValidatorConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SPIFFECertValidatorConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SPIFFECertValidatorConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SPIFFECertValidatorConfigValidationError) ErrorName() string { + return "SPIFFECertValidatorConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e SPIFFECertValidatorConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSPIFFECertValidatorConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SPIFFECertValidatorConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SPIFFECertValidatorConfigValidationError{} + +// Validate checks the field values on SPIFFECertValidatorConfig_TrustDomain +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *SPIFFECertValidatorConfig_TrustDomain) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SPIFFECertValidatorConfig_TrustDomain +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// SPIFFECertValidatorConfig_TrustDomainMultiError, or nil if none found. +func (m *SPIFFECertValidatorConfig_TrustDomain) ValidateAll() error { + return m.validate(true) +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTrustBundle()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrustBundle()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SPIFFECertValidatorConfig_TrustDomainMultiError(errors) + } + + return nil +} + +// SPIFFECertValidatorConfig_TrustDomainMultiError is an error wrapping +// multiple validation errors returned by +// SPIFFECertValidatorConfig_TrustDomain.ValidateAll() if the designated +// constraints aren't met. +type SPIFFECertValidatorConfig_TrustDomainMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SPIFFECertValidatorConfig_TrustDomainMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SPIFFECertValidatorConfig_TrustDomainMultiError) AllErrors() []error { return m } + +// SPIFFECertValidatorConfig_TrustDomainValidationError is the validation error +// returned by SPIFFECertValidatorConfig_TrustDomain.Validate if the +// designated constraints aren't met. +type SPIFFECertValidatorConfig_TrustDomainValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) ErrorName() string { + return "SPIFFECertValidatorConfig_TrustDomainValidationError" +} + +// Error satisfies the builtin error interface +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSPIFFECertValidatorConfig_TrustDomain.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SPIFFECertValidatorConfig_TrustDomainValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SPIFFECertValidatorConfig_TrustDomainValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go new file mode 100644 index 000000000..6cad4f635 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrustBundle != nil { + if vtmsg, ok := interface{}(m.TrustBundle).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustBundle) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TrustDomains) > 0 { + for iNdEx := len(m.TrustDomains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TrustDomains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrustBundle != nil { + if size, ok := interface{}(m.TrustBundle).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustBundle) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SPIFFECertValidatorConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TrustDomains) > 0 { + for _, e := range m.TrustDomains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go new file mode 100644 index 000000000..287129049 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go @@ -0,0 +1,1265 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnforceRsaKeyUsage != nil { + size, err := (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxSessionKeys != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxSessionKeys).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.AllowRenegotiation { + i-- + if m.AllowRenegotiation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Sni) > 0 { + i -= len(m.Sni) + copy(dAtA[i:], m.Sni) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sni))) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferClientCiphers { + i-- + if m.PreferClientCiphers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DisableStatefulSessionResumption { + i-- + if m.DisableStatefulSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.FullScanCertsOnSniMismatch != nil { + size, err := (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OcspStaplePolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.OcspStaplePolicy)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_DisableStatelessSessionResumption); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SessionTimeout != nil { + size, err := (*durationpb.Duration)(m.SessionTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequireSni != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSni).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RequireClientCertificate != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireClientCertificate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeysSdsSecretConfig != nil { + size, err := m.SessionTicketKeysSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.DisableStatelessSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *TlsKeyLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsKeyLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsKeyLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemoteAddressRange) > 0 { + for iNdEx := len(m.RemoteAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RemoteAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LocalAddressRange) > 0 { + for iNdEx := len(m.LocalAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.LocalAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Config.(*CommonTlsContext_CertificateProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + if vtmsg, ok := interface{}(m.TypedConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CertificateProviderInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValidationContext != nil { + size, err := m.DefaultValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CustomTlsCertificateSelector != nil { + if vtmsg, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTlsCertificateSelector) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.KeyLog != nil { + size, err := m.KeyLog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.TlsCertificateProviderInstance != nil { + size, err := m.TlsCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.CustomHandshaker != nil { + if vtmsg, ok := interface{}(m.CustomHandshaker).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomHandshaker) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProviderInstance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProviderInstance != nil { + size, err := m.TlsCertificateCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProvider); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProvider != nil { + size, err := m.TlsCertificateCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_CombinedValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for iNdEx := len(m.TlsCertificateSdsSecretConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificateSdsSecretConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificates) > 0 { + for iNdEx := len(m.TlsCertificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.TlsParams != nil { + size, err := m.TlsParams.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CombinedValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CombinedValidationContext != nil { + size, err := m.CombinedValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *UpstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sni) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowRenegotiation { + n += 2 + } + if m.MaxSessionKeys != nil { + l = (*wrapperspb.UInt32Value)(m.MaxSessionKeys).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforceRsaKeyUsage != nil { + l = (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireClientCertificate != nil { + l = (*wrapperspb.BoolValue)(m.RequireClientCertificate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireSni != nil { + l = (*wrapperspb.BoolValue)(m.RequireSni).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.SessionTicketKeysType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.SessionTimeout != nil { + l = (*durationpb.Duration)(m.SessionTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaplePolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.OcspStaplePolicy)) + } + if m.FullScanCertsOnSniMismatch != nil { + l = (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableStatefulSessionResumption { + n += 2 + } + if m.PreferClientCiphers { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeysSdsSecretConfig != nil { + l = m.SessionTicketKeysSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *TlsKeyLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LocalAddressRange) > 0 { + for _, e := range m.LocalAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RemoteAddressRange) > 0 { + for _, e := range m.RemoteAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Config.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + if size, ok := interface{}(m.TypedConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValidationContext != nil { + l = m.DefaultValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsParams != nil { + l = m.TlsParams.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TlsCertificates) > 0 { + for _, e := range m.TlsCertificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.ValidationContextType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for _, e := range m.TlsCertificateSdsSecretConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TlsCertificateCertificateProvider != nil { + l = m.TlsCertificateCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateCertificateProviderInstance != nil { + l = m.TlsCertificateCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomHandshaker != nil { + if size, ok := interface{}(m.CustomHandshaker).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomHandshaker) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateProviderInstance != nil { + l = m.TlsCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyLog != nil { + l = m.KeyLog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomTlsCertificateSelector != nil { + if size, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomTlsCertificateSelector) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CombinedValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CombinedValidationContext != nil { + l = m.CombinedValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go new file mode 100644 index 000000000..6f09930e9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +type AdsDummy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AdsDummy) Reset() { + *x = AdsDummy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdsDummy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdsDummy) ProtoMessage() {} + +func (x *AdsDummy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdsDummy.ProtoReflect.Descriptor instead. +func (*AdsDummy) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_ads_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_service_discovery_v3_ads_proto protoreflect.FileDescriptor + +var file_envoy_service_discovery_v3_ads_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x32, 0xa6, 0x02, 0x0a, 0x1a, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x28, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x41, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_discovery_v3_ads_proto_rawDescOnce sync.Once + file_envoy_service_discovery_v3_ads_proto_rawDescData = file_envoy_service_discovery_v3_ads_proto_rawDesc +) + +func file_envoy_service_discovery_v3_ads_proto_rawDescGZIP() []byte { + file_envoy_service_discovery_v3_ads_proto_rawDescOnce.Do(func() { + file_envoy_service_discovery_v3_ads_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_ads_proto_rawDescData) + }) + return file_envoy_service_discovery_v3_ads_proto_rawDescData +} + +var file_envoy_service_discovery_v3_ads_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_service_discovery_v3_ads_proto_goTypes = []interface{}{ + (*AdsDummy)(nil), // 0: envoy.service.discovery.v3.AdsDummy + (*DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest + (*DeltaDiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DeltaDiscoveryRequest + (*DiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DiscoveryResponse + (*DeltaDiscoveryResponse)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryResponse +} +var file_envoy_service_discovery_v3_ads_proto_depIdxs = []int32{ + 1, // 0: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 2, // 1: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:input_type -> envoy.service.discovery.v3.DeltaDiscoveryRequest + 3, // 2: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 4, // 3: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:output_type -> envoy.service.discovery.v3.DeltaDiscoveryResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_service_discovery_v3_ads_proto_init() } +func file_envoy_service_discovery_v3_ads_proto_init() { + if File_envoy_service_discovery_v3_ads_proto != nil { + return + } + file_envoy_service_discovery_v3_discovery_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_service_discovery_v3_ads_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdsDummy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_discovery_v3_ads_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_discovery_v3_ads_proto_goTypes, + DependencyIndexes: file_envoy_service_discovery_v3_ads_proto_depIdxs, + MessageInfos: file_envoy_service_discovery_v3_ads_proto_msgTypes, + }.Build() + File_envoy_service_discovery_v3_ads_proto = out.File + file_envoy_service_discovery_v3_ads_proto_rawDesc = nil + file_envoy_service_discovery_v3_ads_proto_goTypes = nil + file_envoy_service_discovery_v3_ads_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go new file mode 100644 index 000000000..9080fedd8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go @@ -0,0 +1,136 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on AdsDummy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AdsDummy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AdsDummy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AdsDummyMultiError, or nil +// if none found. +func (m *AdsDummy) ValidateAll() error { + return m.validate(true) +} + +func (m *AdsDummy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return AdsDummyMultiError(errors) + } + + return nil +} + +// AdsDummyMultiError is an error wrapping multiple validation errors returned +// by AdsDummy.ValidateAll() if the designated constraints aren't met. +type AdsDummyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdsDummyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdsDummyMultiError) AllErrors() []error { return m } + +// AdsDummyValidationError is the validation error returned by +// AdsDummy.Validate if the designated constraints aren't met. +type AdsDummyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdsDummyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdsDummyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdsDummyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdsDummyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdsDummyValidationError) ErrorName() string { return "AdsDummyValidationError" } + +// Error satisfies the builtin error interface +func (e AdsDummyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdsDummy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdsDummyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdsDummyValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go new file mode 100644 index 000000000..7a7f1af97 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources" + AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources" +) + +// AggregatedDiscoveryServiceClient is the client API for AggregatedDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AggregatedDiscoveryServiceClient interface { + // This is a gRPC-only API. + StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) + DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) +} + +type aggregatedDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAggregatedDiscoveryServiceClient(cc grpc.ClientConnInterface) AggregatedDiscoveryServiceClient { + return &aggregatedDiscoveryServiceClient{cc} +} + +func (c *aggregatedDiscoveryServiceClient) StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[0], AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceStreamAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesClient interface { + Send(*DiscoveryRequest) error + Recv() (*DiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Send(m *DiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Recv() (*DiscoveryResponse, error) { + m := new(DiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aggregatedDiscoveryServiceClient) DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[1], AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceDeltaAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesClient interface { + Send(*DeltaDiscoveryRequest) error + Recv() (*DeltaDiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Send(m *DeltaDiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Recv() (*DeltaDiscoveryResponse, error) { + m := new(DeltaDiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryServiceServer is the server API for AggregatedDiscoveryService service. +// All implementations should embed UnimplementedAggregatedDiscoveryServiceServer +// for forward compatibility +type AggregatedDiscoveryServiceServer interface { + // This is a gRPC-only API. + StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error + DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error +} + +// UnimplementedAggregatedDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedAggregatedDiscoveryServiceServer struct { +} + +func (UnimplementedAggregatedDiscoveryServiceServer) StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamAggregatedResources not implemented") +} +func (UnimplementedAggregatedDiscoveryServiceServer) DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method DeltaAggregatedResources not implemented") +} + +// UnsafeAggregatedDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AggregatedDiscoveryServiceServer will +// result in compilation errors. +type UnsafeAggregatedDiscoveryServiceServer interface { + mustEmbedUnimplementedAggregatedDiscoveryServiceServer() +} + +func RegisterAggregatedDiscoveryServiceServer(s grpc.ServiceRegistrar, srv AggregatedDiscoveryServiceServer) { + s.RegisterService(&AggregatedDiscoveryService_ServiceDesc, srv) +} + +func _AggregatedDiscoveryService_StreamAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).StreamAggregatedResources(&aggregatedDiscoveryServiceStreamAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesServer interface { + Send(*DiscoveryResponse) error + Recv() (*DiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Send(m *DiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Recv() (*DiscoveryRequest, error) { + m := new(DiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _AggregatedDiscoveryService_DeltaAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).DeltaAggregatedResources(&aggregatedDiscoveryServiceDeltaAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesServer interface { + Send(*DeltaDiscoveryResponse) error + Recv() (*DeltaDiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Send(m *DeltaDiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Recv() (*DeltaDiscoveryRequest, error) { + m := new(DeltaDiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryService_ServiceDesc is the grpc.ServiceDesc for AggregatedDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AggregatedDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.discovery.v3.AggregatedDiscoveryService", + HandlerType: (*AggregatedDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamAggregatedResources", + Handler: _AggregatedDiscoveryService_StreamAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "DeltaAggregatedResources", + Handler: _AggregatedDiscoveryService_DeltaAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/discovery/v3/ads.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go new file mode 100644 index 000000000..0e604235d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdsDummy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdsDummy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdsDummy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdsDummy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go new file mode 100644 index 000000000..a9b5f6935 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go @@ -0,0 +1,1693 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies a resource to be subscribed to. +type ResourceLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name to subscribe to. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of dynamic parameters used to match against the dynamic parameter + // constraints on the resource. This allows clients to select between + // multiple variants of the same resource. + DynamicParameters map[string]string `protobuf:"bytes,2,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ResourceLocator) Reset() { + *x = ResourceLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator) ProtoMessage() {} + +func (x *ResourceLocator) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead. +func (*ResourceLocator) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceLocator) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceLocator) GetDynamicParameters() map[string]string { + if x != nil { + return x.DynamicParameters + } + return nil +} + +// Specifies a concrete resource name. +type ResourceName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Dynamic parameter constraints associated with this resource. To be used by client-side caches + // (including xDS proxies) when matching subscribed resource locators. + DynamicParameterConstraints *DynamicParameterConstraints `protobuf:"bytes,2,opt,name=dynamic_parameter_constraints,json=dynamicParameterConstraints,proto3" json:"dynamic_parameter_constraints,omitempty"` +} + +func (x *ResourceName) Reset() { + *x = ResourceName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceName) ProtoMessage() {} + +func (x *ResourceName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead. +func (*ResourceName) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceName) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceName) GetDynamicParameterConstraints() *DynamicParameterConstraints { + if x != nil { + return x.DynamicParameterConstraints + } + return nil +} + +// A DiscoveryRequest requests a set of versioned resources of the same type for +// a given Envoy node on some API. +// [#next-free-field: 8] +type DiscoveryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version_info provided in the request messages will be the version_info + // received with the most recent successfully processed response or empty on + // the first request. It is expected that no new request is sent after a + // response is received until the Envoy instance is ready to ACK/NACK the new + // configuration. ACK/NACK takes place by returning the new API config version + // as applied or the previous API config version respectively. Each type_url + // (see below) has an independent version associated with it. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The node making the request. + Node *v3.Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + // List of resources to subscribe to, e.g. list of cluster names or a route + // configuration name. If this is empty, all resources for the API are + // returned. LDS/CDS may have empty resource_names, which will cause all + // resources for the Envoy instance to be returned. The LDS and CDS responses + // will then imply a number of resources that need to be fetched via EDS/RDS, + // which will be explicitly enumerated in resource_names. + ResourceNames []string `protobuf:"bytes,3,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names“ field that allows specifying dynamic + // parameters along with each resource name. Clients that populate this + // field must be able to handle responses from the server where resources + // are wrapped in a Resource message. + // Note that it is legal for a request to have some resources listed + // in “resource_names“ and others in “resource_locators“. + ResourceLocators []*ResourceLocator `protobuf:"bytes,7,rep,name=resource_locators,json=resourceLocators,proto3" json:"resource_locators,omitempty"` + // Type of the resource that is being requested, e.g. + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit + // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is + // required for ADS. + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above + // discussion on version_info and the DiscoveryResponse nonce comment. This + // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, + // or 2) the client has not yet accepted an update in this xDS stream (unlike + // delta, where it is populated only for new explicit ACKs). + ResponseNonce string `protobuf:"bytes,5,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The “message“ field in “error_details“ provides the Envoy + // internal exception related to the failure. It is only intended for consumption during manual + // debugging, the string provided is not guaranteed to be stable across Envoy versions. + ErrorDetail *status.Status `protobuf:"bytes,6,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` +} + +func (x *DiscoveryRequest) Reset() { + *x = DiscoveryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiscoveryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoveryRequest) ProtoMessage() {} + +func (x *DiscoveryRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoveryRequest.ProtoReflect.Descriptor instead. +func (*DiscoveryRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{2} +} + +func (x *DiscoveryRequest) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *DiscoveryRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *DiscoveryRequest) GetResourceNames() []string { + if x != nil { + return x.ResourceNames + } + return nil +} + +func (x *DiscoveryRequest) GetResourceLocators() []*ResourceLocator { + if x != nil { + return x.ResourceLocators + } + return nil +} + +func (x *DiscoveryRequest) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DiscoveryRequest) GetResponseNonce() string { + if x != nil { + return x.ResponseNonce + } + return "" +} + +func (x *DiscoveryRequest) GetErrorDetail() *status.Status { + if x != nil { + return x.ErrorDetail + } + return nil +} + +// [#next-free-field: 7] +type DiscoveryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the response data. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The response resources. These resources are typed and depend on the API being called. + Resources []*anypb.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + // [#not-implemented-hide:] + // Canary is used to support two Envoy command line flags: + // + // - --terminate-on-canary-transition-failure. When set, Envoy is able to + // terminate if it detects that configuration is stuck at canary. Consider + // this example sequence of updates: + // - Management server applies a canary config successfully. + // - Management server rolls back to a production config. + // - Envoy rejects the new production config. + // Since there is no sensible way to continue receiving configuration + // updates, Envoy will then terminate and apply production config from a + // clean slate. + // - --dry-run-canary. When set, a canary response will never be applied, only + // validated via a dry run. + Canary bool `protobuf:"varint,3,opt,name=canary,proto3" json:"canary,omitempty"` + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // For gRPC based subscriptions, the nonce provides a way to explicitly ack a + // specific DiscoveryResponse in a following DiscoveryRequest. Additional + // messages may have been sent by Envoy to the management server for the + // previous version on the stream prior to this DiscoveryResponse, that were + // unprocessed at response send time. The nonce allows the management server + // to ignore any further DiscoveryRequests for the previous version until a + // DiscoveryRequest bearing the nonce. The nonce is optional and is not + // required for non-stream based xDS implementations. + Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"` + // The control plane instance that sent the response. + ControlPlane *v3.ControlPlane `protobuf:"bytes,6,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"` +} + +func (x *DiscoveryResponse) Reset() { + *x = DiscoveryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiscoveryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoveryResponse) ProtoMessage() {} + +func (x *DiscoveryResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoveryResponse.ProtoReflect.Descriptor instead. +func (*DiscoveryResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{3} +} + +func (x *DiscoveryResponse) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *DiscoveryResponse) GetResources() []*anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *DiscoveryResponse) GetCanary() bool { + if x != nil { + return x.Canary + } + return false +} + +func (x *DiscoveryResponse) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DiscoveryResponse) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *DiscoveryResponse) GetControlPlane() *v3.ControlPlane { + if x != nil { + return x.ControlPlane + } + return nil +} + +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. +// +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per-resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional +// stream. This allows the xDS server to keep track of the state of xDS clients +// connected to it. +// +// In Delta xDS the nonce field is required and used to pair +// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. +// Optionally, a response message level system_version_info is present for +// debugging purposes only. +// +// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest +// can be either or both of: [1] informing the server of what resources the +// client has gained/lost interest in (using resource_names_subscribe and +// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from +// the server (using response_nonce, with presence of error_detail making it a NACK). +// Additionally, the first message (for a given type_url) of a reconnected gRPC stream +// has a third role: informing the server of the resources (and their versions) +// that the client already possesses, using the initial_resource_versions field. +// +// As with state-of-the-world, when multiple resource types are multiplexed (ADS), +// all requests/acknowledgments/updates are logically walled off by type_url: +// a Cluster ACK exists in a completely separate world from a prior Route NACK. +// In particular, initial_resource_versions being sent at the "start" of every +// gRPC stream actually entails a message for each type_url, each with its own +// initial_resource_versions. +// [#next-free-field: 10] +type DeltaDiscoveryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The node making the request. + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Type of the resource that is being requested, e.g. + // “type.googleapis.com/envoy.api.v2.ClusterLoadAssignment“. This does not need to be set if + // resources are only referenced via “xds_resource_subscribe“ and + // “xds_resources_unsubscribe“. + TypeUrl string `protobuf:"bytes,2,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // DeltaDiscoveryRequests allow the client to add or remove individual + // resources to the set of tracked resources in the context of a stream. + // All resource names in the resource_names_subscribe list are added to the + // set of tracked resources and all resource names in the resource_names_unsubscribe + // list are removed from the set of tracked resources. + // + // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or + // resource_names_unsubscribe list simply means that no resources are to be + // added or removed to the resource list. + // *Like* state-of-the-world xDS, the server must send updates for all tracked + // resources, but can also send updates for resources the client has not subscribed to. + // + // NOTE: the server must respond with all resources listed in resource_names_subscribe, + // even if it believes the client has the most recent version of them. The reason: + // the client may have dropped them, but then regained interest before it had a chance + // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. + // + // These two fields can be set in any DeltaDiscoveryRequest, including ACKs + // and initial_resource_versions. + // + // A list of Resource names to add to the list of tracked resources. + ResourceNamesSubscribe []string `protobuf:"bytes,3,rep,name=resource_names_subscribe,json=resourceNamesSubscribe,proto3" json:"resource_names_subscribe,omitempty"` + // A list of Resource names to remove from the list of tracked resources. + ResourceNamesUnsubscribe []string `protobuf:"bytes,4,rep,name=resource_names_unsubscribe,json=resourceNamesUnsubscribe,proto3" json:"resource_names_unsubscribe,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names_subscribe“ field that allows specifying dynamic parameters + // along with each resource name. + // Note that it is legal for a request to have some resources listed + // in “resource_names_subscribe“ and others in “resource_locators_subscribe“. + ResourceLocatorsSubscribe []*ResourceLocator `protobuf:"bytes,8,rep,name=resource_locators_subscribe,json=resourceLocatorsSubscribe,proto3" json:"resource_locators_subscribe,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names_unsubscribe“ field that allows specifying dynamic parameters + // along with each resource name. + // Note that it is legal for a request to have some resources listed + // in “resource_names_unsubscribe“ and others in “resource_locators_unsubscribe“. + ResourceLocatorsUnsubscribe []*ResourceLocator `protobuf:"bytes,9,rep,name=resource_locators_unsubscribe,json=resourceLocatorsUnsubscribe,proto3" json:"resource_locators_unsubscribe,omitempty"` + // Informs the server of the versions of the resources the xDS client knows of, to enable the + // client to continue the same logical xDS session even in the face of gRPC stream reconnection. + // It will not be populated: [1] in the very first stream of a session, since the client will + // not yet have any resources, [2] in any message after the first in a stream (for a given + // type_url), since the server will already be correctly tracking the client's state. + // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) + // The map's keys are names of xDS resources known to the xDS client. + // The map's values are opaque resource versions. + InitialResourceVersions map[string]string `protobuf:"bytes,5,rep,name=initial_resource_versions,json=initialResourceVersions,proto3" json:"initial_resource_versions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // When the DeltaDiscoveryRequest is a ACK or NACK message in response + // to a previous DeltaDiscoveryResponse, the response_nonce must be the + // nonce in the DeltaDiscoveryResponse. + // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. + ResponseNonce string `protobuf:"bytes,6,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The “message“ field in “error_details“ + // provides the Envoy internal exception related to the failure. + ErrorDetail *status.Status `protobuf:"bytes,7,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` +} + +func (x *DeltaDiscoveryRequest) Reset() { + *x = DeltaDiscoveryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeltaDiscoveryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeltaDiscoveryRequest) ProtoMessage() {} + +func (x *DeltaDiscoveryRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeltaDiscoveryRequest.ProtoReflect.Descriptor instead. +func (*DeltaDiscoveryRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{4} +} + +func (x *DeltaDiscoveryRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DeltaDiscoveryRequest) GetResourceNamesSubscribe() []string { + if x != nil { + return x.ResourceNamesSubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceNamesUnsubscribe() []string { + if x != nil { + return x.ResourceNamesUnsubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceLocatorsSubscribe() []*ResourceLocator { + if x != nil { + return x.ResourceLocatorsSubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceLocatorsUnsubscribe() []*ResourceLocator { + if x != nil { + return x.ResourceLocatorsUnsubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetInitialResourceVersions() map[string]string { + if x != nil { + return x.InitialResourceVersions + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResponseNonce() string { + if x != nil { + return x.ResponseNonce + } + return "" +} + +func (x *DeltaDiscoveryRequest) GetErrorDetail() *status.Status { + if x != nil { + return x.ErrorDetail + } + return nil +} + +// [#next-free-field: 9] +type DeltaDiscoveryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the response data (used for debugging). + SystemVersionInfo string `protobuf:"bytes,1,opt,name=system_version_info,json=systemVersionInfo,proto3" json:"system_version_info,omitempty"` + // The response resources. These are typed resources, whose types must match + // the type_url field. + Resources []*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Resources names of resources that have be deleted and to be removed from the xDS Client. + // Removed resources for missing resources can be ignored. + RemovedResources []string `protobuf:"bytes,6,rep,name=removed_resources,json=removedResources,proto3" json:"removed_resources,omitempty"` + // Alternative to removed_resources that allows specifying which variant of + // a resource is being removed. This variant must be used for any resource + // for which dynamic parameter constraints were sent to the client. + RemovedResourceNames []*ResourceName `protobuf:"bytes,8,rep,name=removed_resource_names,json=removedResourceNames,proto3" json:"removed_resource_names,omitempty"` + // The nonce provides a way for DeltaDiscoveryRequests to uniquely + // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. + Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"` + // [#not-implemented-hide:] + // The control plane instance that sent the response. + ControlPlane *v3.ControlPlane `protobuf:"bytes,7,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"` +} + +func (x *DeltaDiscoveryResponse) Reset() { + *x = DeltaDiscoveryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeltaDiscoveryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeltaDiscoveryResponse) ProtoMessage() {} + +func (x *DeltaDiscoveryResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeltaDiscoveryResponse.ProtoReflect.Descriptor instead. +func (*DeltaDiscoveryResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{5} +} + +func (x *DeltaDiscoveryResponse) GetSystemVersionInfo() string { + if x != nil { + return x.SystemVersionInfo + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetResources() []*Resource { + if x != nil { + return x.Resources + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetRemovedResources() []string { + if x != nil { + return x.RemovedResources + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetRemovedResourceNames() []*ResourceName { + if x != nil { + return x.RemovedResourceNames + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetControlPlane() *v3.ControlPlane { + if x != nil { + return x.ControlPlane + } + return nil +} + +// A set of dynamic parameter constraints associated with a variant of an individual xDS resource. +// These constraints determine whether the resource matches a subscription based on the set of +// dynamic parameters in the subscription, as specified in the +// :ref:`ResourceLocator.dynamic_parameters` +// field. This allows xDS implementations (clients, servers, and caching proxies) to determine +// which variant of a resource is appropriate for a given client. +type DynamicParameterConstraints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *DynamicParameterConstraints_Constraint + // *DynamicParameterConstraints_OrConstraints + // *DynamicParameterConstraints_AndConstraints + // *DynamicParameterConstraints_NotConstraints + Type isDynamicParameterConstraints_Type `protobuf_oneof:"type"` +} + +func (x *DynamicParameterConstraints) Reset() { + *x = DynamicParameterConstraints{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints) ProtoMessage() {} + +func (x *DynamicParameterConstraints) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6} +} + +func (m *DynamicParameterConstraints) GetType() isDynamicParameterConstraints_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *DynamicParameterConstraints) GetConstraint() *DynamicParameterConstraints_SingleConstraint { + if x, ok := x.GetType().(*DynamicParameterConstraints_Constraint); ok { + return x.Constraint + } + return nil +} + +func (x *DynamicParameterConstraints) GetOrConstraints() *DynamicParameterConstraints_ConstraintList { + if x, ok := x.GetType().(*DynamicParameterConstraints_OrConstraints); ok { + return x.OrConstraints + } + return nil +} + +func (x *DynamicParameterConstraints) GetAndConstraints() *DynamicParameterConstraints_ConstraintList { + if x, ok := x.GetType().(*DynamicParameterConstraints_AndConstraints); ok { + return x.AndConstraints + } + return nil +} + +func (x *DynamicParameterConstraints) GetNotConstraints() *DynamicParameterConstraints { + if x, ok := x.GetType().(*DynamicParameterConstraints_NotConstraints); ok { + return x.NotConstraints + } + return nil +} + +type isDynamicParameterConstraints_Type interface { + isDynamicParameterConstraints_Type() +} + +type DynamicParameterConstraints_Constraint struct { + // A single constraint to evaluate. + Constraint *DynamicParameterConstraints_SingleConstraint `protobuf:"bytes,1,opt,name=constraint,proto3,oneof"` +} + +type DynamicParameterConstraints_OrConstraints struct { + // A list of constraints that match if any one constraint in the list + // matches. + OrConstraints *DynamicParameterConstraints_ConstraintList `protobuf:"bytes,2,opt,name=or_constraints,json=orConstraints,proto3,oneof"` +} + +type DynamicParameterConstraints_AndConstraints struct { + // A list of constraints that must all match. + AndConstraints *DynamicParameterConstraints_ConstraintList `protobuf:"bytes,3,opt,name=and_constraints,json=andConstraints,proto3,oneof"` +} + +type DynamicParameterConstraints_NotConstraints struct { + // The inverse (NOT) of a set of constraints. + NotConstraints *DynamicParameterConstraints `protobuf:"bytes,4,opt,name=not_constraints,json=notConstraints,proto3,oneof"` +} + +func (*DynamicParameterConstraints_Constraint) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_OrConstraints) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_AndConstraints) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_NotConstraints) isDynamicParameterConstraints_Type() {} + +// [#next-free-field: 10] +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource's name, to distinguish it from others of the same type of resource. + // Only one of “name“ or “resource_name“ may be set. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Alternative to the “name“ field, to be used when the server supports + // multiple variants of the named resource that are differentiated by + // dynamic parameter constraints. + // Only one of “name“ or “resource_name“ may be set. + ResourceName *ResourceName `protobuf:"bytes,8,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The aliases are a list of other names that this resource can go by. + Aliases []string `protobuf:"bytes,4,rep,name=aliases,proto3" json:"aliases,omitempty"` + // The resource level version. It allows xDS to track the state of individual + // resources. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The resource being tracked. + Resource *anypb.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Time-to-live value for the resource. For each resource, a timer is started. The timer is + // reset each time the resource is received with a new TTL. If the resource is received with + // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the + // configuration for the resource will be removed. + // + // The TTL can be refreshed or changed by sending a response that doesn't change the resource + // version. In this case the resource field does not need to be populated, which allows for + // light-weight "heartbeat" updates to keep a resource with a TTL alive. + // + // The TTL feature is meant to support configurations that should be removed in the event of + // a management server failure. For example, the feature may be used for fault injection + // testing where the fault injection should be terminated in the event that Envoy loses contact + // with the management server. + Ttl *durationpb.Duration `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Cache control properties for the resource. + // [#not-implemented-hide:] + CacheControl *Resource_CacheControl `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // The Metadata field can be used to provide additional information for the resource. + // E.g. the trace data for debugging. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{7} +} + +func (x *Resource) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Resource) GetResourceName() *ResourceName { + if x != nil { + return x.ResourceName + } + return nil +} + +func (x *Resource) GetAliases() []string { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Resource) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Resource) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *Resource) GetCacheControl() *Resource_CacheControl { + if x != nil { + return x.CacheControl + } + return nil +} + +func (x *Resource) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// A single constraint for a given key. +type DynamicParameterConstraints_SingleConstraint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to match against. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Types that are assignable to ConstraintType: + // + // *DynamicParameterConstraints_SingleConstraint_Value + // *DynamicParameterConstraints_SingleConstraint_Exists_ + ConstraintType isDynamicParameterConstraints_SingleConstraint_ConstraintType `protobuf_oneof:"constraint_type"` +} + +func (x *DynamicParameterConstraints_SingleConstraint) Reset() { + *x = DynamicParameterConstraints_SingleConstraint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_SingleConstraint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_SingleConstraint) ProtoMessage() {} + +func (x *DynamicParameterConstraints_SingleConstraint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_SingleConstraint.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_SingleConstraint) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (m *DynamicParameterConstraints_SingleConstraint) GetConstraintType() isDynamicParameterConstraints_SingleConstraint_ConstraintType { + if m != nil { + return m.ConstraintType + } + return nil +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetValue() string { + if x, ok := x.GetConstraintType().(*DynamicParameterConstraints_SingleConstraint_Value); ok { + return x.Value + } + return "" +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetExists() *DynamicParameterConstraints_SingleConstraint_Exists { + if x, ok := x.GetConstraintType().(*DynamicParameterConstraints_SingleConstraint_Exists_); ok { + return x.Exists + } + return nil +} + +type isDynamicParameterConstraints_SingleConstraint_ConstraintType interface { + isDynamicParameterConstraints_SingleConstraint_ConstraintType() +} + +type DynamicParameterConstraints_SingleConstraint_Value struct { + // Matches this exact value. + Value string `protobuf:"bytes,2,opt,name=value,proto3,oneof"` +} + +type DynamicParameterConstraints_SingleConstraint_Exists_ struct { + // Key is present (matches any value except for the key being absent). + // This allows setting a default constraint for clients that do + // not send a key at all, while there may be other clients that need + // special configuration based on that key. + Exists *DynamicParameterConstraints_SingleConstraint_Exists `protobuf:"bytes,3,opt,name=exists,proto3,oneof"` +} + +func (*DynamicParameterConstraints_SingleConstraint_Value) isDynamicParameterConstraints_SingleConstraint_ConstraintType() { +} + +func (*DynamicParameterConstraints_SingleConstraint_Exists_) isDynamicParameterConstraints_SingleConstraint_ConstraintType() { +} + +type DynamicParameterConstraints_ConstraintList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Constraints []*DynamicParameterConstraints `protobuf:"bytes,1,rep,name=constraints,proto3" json:"constraints,omitempty"` +} + +func (x *DynamicParameterConstraints_ConstraintList) Reset() { + *x = DynamicParameterConstraints_ConstraintList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_ConstraintList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_ConstraintList) ProtoMessage() {} + +func (x *DynamicParameterConstraints_ConstraintList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_ConstraintList.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_ConstraintList) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *DynamicParameterConstraints_ConstraintList) GetConstraints() []*DynamicParameterConstraints { + if x != nil { + return x.Constraints + } + return nil +} + +type DynamicParameterConstraints_SingleConstraint_Exists struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) Reset() { + *x = DynamicParameterConstraints_SingleConstraint_Exists{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_SingleConstraint_Exists) ProtoMessage() {} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_SingleConstraint_Exists.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_SingleConstraint_Exists) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 0, 0} +} + +// Cache control properties for the resource. +// [#not-implemented-hide:] +type Resource_CacheControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, xDS proxies may not cache this resource. + // Note that this does not apply to clients other than xDS proxies, which must cache resources + // for their own use, regardless of the value of this field. + DoNotCache bool `protobuf:"varint,1,opt,name=do_not_cache,json=doNotCache,proto3" json:"do_not_cache,omitempty"` +} + +func (x *Resource_CacheControl) Reset() { + *x = Resource_CacheControl{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource_CacheControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource_CacheControl) ProtoMessage() {} + +func (x *Resource_CacheControl) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource_CacheControl.ProtoReflect.Descriptor instead. +func (*Resource_CacheControl) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *Resource_CacheControl) GetDoNotCache() bool { + if x != nil { + return x.DoNotCache + } + return false +} + +var File_envoy_service_discovery_v3_discovery_proto protoreflect.FileDescriptor + +var file_envoy_service_discovery_v3_discovery_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x71, 0x0a, 0x12, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x1a, 0x44, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9f, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7b, + 0x0a, 0x1d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x1b, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x03, 0x0a, 0x10, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x11, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, + 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x24, 0x9a, + 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xa3, 0x02, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, + 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, + 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, + 0x6e, 0x65, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x06, 0x0a, 0x15, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x38, + 0x0a, 0x18, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x16, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x3c, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x75, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x55, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x6b, 0x0a, 0x1b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x19, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x12, 0x6f, 0x0a, 0x1d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x75, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x1b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x1a, + 0x4a, 0x0a, 0x1c, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, + 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x03, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x74, 0x61, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x42, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, + 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x5e, 0x0a, + 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x92, 0x06, 0x0a, 0x1b, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x0e, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x71, 0x0a, 0x0f, 0x61, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, + 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x6e, 0x6f, + 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0xc9, 0x01, 0x0a, + 0x10, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x69, 0x0a, 0x06, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, + 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, + 0x42, 0x16, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x59, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xe4, 0x03, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, + 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x56, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x30, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x6f, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x4e, 0x6f, + 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x93, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_service_discovery_v3_discovery_proto_rawDescOnce sync.Once + file_envoy_service_discovery_v3_discovery_proto_rawDescData = file_envoy_service_discovery_v3_discovery_proto_rawDesc +) + +func file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP() []byte { + file_envoy_service_discovery_v3_discovery_proto_rawDescOnce.Do(func() { + file_envoy_service_discovery_v3_discovery_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_discovery_proto_rawDescData) + }) + return file_envoy_service_discovery_v3_discovery_proto_rawDescData +} + +var file_envoy_service_discovery_v3_discovery_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_envoy_service_discovery_v3_discovery_proto_goTypes = []interface{}{ + (*ResourceLocator)(nil), // 0: envoy.service.discovery.v3.ResourceLocator + (*ResourceName)(nil), // 1: envoy.service.discovery.v3.ResourceName + (*DiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DiscoveryRequest + (*DiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DiscoveryResponse + (*DeltaDiscoveryRequest)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryRequest + (*DeltaDiscoveryResponse)(nil), // 5: envoy.service.discovery.v3.DeltaDiscoveryResponse + (*DynamicParameterConstraints)(nil), // 6: envoy.service.discovery.v3.DynamicParameterConstraints + (*Resource)(nil), // 7: envoy.service.discovery.v3.Resource + nil, // 8: envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry + nil, // 9: envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry + (*DynamicParameterConstraints_SingleConstraint)(nil), // 10: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint + (*DynamicParameterConstraints_ConstraintList)(nil), // 11: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + (*DynamicParameterConstraints_SingleConstraint_Exists)(nil), // 12: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists + (*Resource_CacheControl)(nil), // 13: envoy.service.discovery.v3.Resource.CacheControl + (*v3.Node)(nil), // 14: envoy.config.core.v3.Node + (*status.Status)(nil), // 15: google.rpc.Status + (*anypb.Any)(nil), // 16: google.protobuf.Any + (*v3.ControlPlane)(nil), // 17: envoy.config.core.v3.ControlPlane + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*v3.Metadata)(nil), // 19: envoy.config.core.v3.Metadata +} +var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{ + 8, // 0: envoy.service.discovery.v3.ResourceLocator.dynamic_parameters:type_name -> envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry + 6, // 1: envoy.service.discovery.v3.ResourceName.dynamic_parameter_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 14, // 2: envoy.service.discovery.v3.DiscoveryRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 3: envoy.service.discovery.v3.DiscoveryRequest.resource_locators:type_name -> envoy.service.discovery.v3.ResourceLocator + 15, // 4: envoy.service.discovery.v3.DiscoveryRequest.error_detail:type_name -> google.rpc.Status + 16, // 5: envoy.service.discovery.v3.DiscoveryResponse.resources:type_name -> google.protobuf.Any + 17, // 6: envoy.service.discovery.v3.DiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane + 14, // 7: envoy.service.discovery.v3.DeltaDiscoveryRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 8: envoy.service.discovery.v3.DeltaDiscoveryRequest.resource_locators_subscribe:type_name -> envoy.service.discovery.v3.ResourceLocator + 0, // 9: envoy.service.discovery.v3.DeltaDiscoveryRequest.resource_locators_unsubscribe:type_name -> envoy.service.discovery.v3.ResourceLocator + 9, // 10: envoy.service.discovery.v3.DeltaDiscoveryRequest.initial_resource_versions:type_name -> envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry + 15, // 11: envoy.service.discovery.v3.DeltaDiscoveryRequest.error_detail:type_name -> google.rpc.Status + 7, // 12: envoy.service.discovery.v3.DeltaDiscoveryResponse.resources:type_name -> envoy.service.discovery.v3.Resource + 1, // 13: envoy.service.discovery.v3.DeltaDiscoveryResponse.removed_resource_names:type_name -> envoy.service.discovery.v3.ResourceName + 17, // 14: envoy.service.discovery.v3.DeltaDiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane + 10, // 15: envoy.service.discovery.v3.DynamicParameterConstraints.constraint:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint + 11, // 16: envoy.service.discovery.v3.DynamicParameterConstraints.or_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + 11, // 17: envoy.service.discovery.v3.DynamicParameterConstraints.and_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + 6, // 18: envoy.service.discovery.v3.DynamicParameterConstraints.not_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 1, // 19: envoy.service.discovery.v3.Resource.resource_name:type_name -> envoy.service.discovery.v3.ResourceName + 16, // 20: envoy.service.discovery.v3.Resource.resource:type_name -> google.protobuf.Any + 18, // 21: envoy.service.discovery.v3.Resource.ttl:type_name -> google.protobuf.Duration + 13, // 22: envoy.service.discovery.v3.Resource.cache_control:type_name -> envoy.service.discovery.v3.Resource.CacheControl + 19, // 23: envoy.service.discovery.v3.Resource.metadata:type_name -> envoy.config.core.v3.Metadata + 12, // 24: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.exists:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists + 6, // 25: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList.constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_envoy_service_discovery_v3_discovery_proto_init() } +func file_envoy_service_discovery_v3_discovery_proto_init() { + if File_envoy_service_discovery_v3_discovery_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_discovery_v3_discovery_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiscoveryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiscoveryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeltaDiscoveryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeltaDiscoveryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_SingleConstraint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_ConstraintList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_SingleConstraint_Exists); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource_CacheControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*DynamicParameterConstraints_Constraint)(nil), + (*DynamicParameterConstraints_OrConstraints)(nil), + (*DynamicParameterConstraints_AndConstraints)(nil), + (*DynamicParameterConstraints_NotConstraints)(nil), + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*DynamicParameterConstraints_SingleConstraint_Value)(nil), + (*DynamicParameterConstraints_SingleConstraint_Exists_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_discovery_v3_discovery_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_service_discovery_v3_discovery_proto_goTypes, + DependencyIndexes: file_envoy_service_discovery_v3_discovery_proto_depIdxs, + MessageInfos: file_envoy_service_discovery_v3_discovery_proto_msgTypes, + }.Build() + File_envoy_service_discovery_v3_discovery_proto = out.File + file_envoy_service_discovery_v3_discovery_proto_rawDesc = nil + file_envoy_service_discovery_v3_discovery_proto_goTypes = nil + file_envoy_service_discovery_v3_discovery_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go new file mode 100644 index 000000000..e30bb1e43 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go @@ -0,0 +1,2139 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceLocator with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocatorMultiError, or nil if none found. +func (m *ResourceLocator) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for DynamicParameters + + if len(errors) > 0 { + return ResourceLocatorMultiError(errors) + } + + return nil +} + +// ResourceLocatorMultiError is an error wrapping multiple validation errors +// returned by ResourceLocator.ValidateAll() if the designated constraints +// aren't met. +type ResourceLocatorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocatorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocatorMultiError) AllErrors() []error { return m } + +// ResourceLocatorValidationError is the validation error returned by +// ResourceLocator.Validate if the designated constraints aren't met. +type ResourceLocatorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocatorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocatorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocatorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocatorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceLocatorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocatorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocatorValidationError{} + +// Validate checks the field values on ResourceName with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceNameMultiError, or +// nil if none found. +func (m *ResourceName) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetDynamicParameterConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicParameterConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceNameMultiError(errors) + } + + return nil +} + +// ResourceNameMultiError is an error wrapping multiple validation errors +// returned by ResourceName.ValidateAll() if the designated constraints aren't met. +type ResourceNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceNameMultiError) AllErrors() []error { return m } + +// ResourceNameValidationError is the validation error returned by +// ResourceName.Validate if the designated constraints aren't met. +type ResourceNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceNameValidationError{} + +// Validate checks the field values on DiscoveryRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DiscoveryRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DiscoveryRequestMultiError, or nil if none found. +func (m *DiscoveryRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DiscoveryRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetResourceLocators() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TypeUrl + + // no validation rules for ResponseNonce + + if all { + switch v := interface{}(m.GetErrorDetail()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DiscoveryRequestMultiError(errors) + } + + return nil +} + +// DiscoveryRequestMultiError is an error wrapping multiple validation errors +// returned by DiscoveryRequest.ValidateAll() if the designated constraints +// aren't met. +type DiscoveryRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DiscoveryRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DiscoveryRequestMultiError) AllErrors() []error { return m } + +// DiscoveryRequestValidationError is the validation error returned by +// DiscoveryRequest.Validate if the designated constraints aren't met. +type DiscoveryRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DiscoveryRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DiscoveryRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DiscoveryRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DiscoveryRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DiscoveryRequestValidationError) ErrorName() string { return "DiscoveryRequestValidationError" } + +// Error satisfies the builtin error interface +func (e DiscoveryRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDiscoveryRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DiscoveryRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DiscoveryRequestValidationError{} + +// Validate checks the field values on DiscoveryResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DiscoveryResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DiscoveryResponseMultiError, or nil if none found. +func (m *DiscoveryResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DiscoveryResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Canary + + // no validation rules for TypeUrl + + // no validation rules for Nonce + + if all { + switch v := interface{}(m.GetControlPlane()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DiscoveryResponseMultiError(errors) + } + + return nil +} + +// DiscoveryResponseMultiError is an error wrapping multiple validation errors +// returned by DiscoveryResponse.ValidateAll() if the designated constraints +// aren't met. +type DiscoveryResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DiscoveryResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DiscoveryResponseMultiError) AllErrors() []error { return m } + +// DiscoveryResponseValidationError is the validation error returned by +// DiscoveryResponse.Validate if the designated constraints aren't met. +type DiscoveryResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DiscoveryResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DiscoveryResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DiscoveryResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DiscoveryResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DiscoveryResponseValidationError) ErrorName() string { + return "DiscoveryResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DiscoveryResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDiscoveryResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DiscoveryResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DiscoveryResponseValidationError{} + +// Validate checks the field values on DeltaDiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeltaDiscoveryRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeltaDiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeltaDiscoveryRequestMultiError, or nil if none found. +func (m *DeltaDiscoveryRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeltaDiscoveryRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TypeUrl + + for idx, item := range m.GetResourceLocatorsSubscribe() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResourceLocatorsUnsubscribe() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for InitialResourceVersions + + // no validation rules for ResponseNonce + + if all { + switch v := interface{}(m.GetErrorDetail()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DeltaDiscoveryRequestMultiError(errors) + } + + return nil +} + +// DeltaDiscoveryRequestMultiError is an error wrapping multiple validation +// errors returned by DeltaDiscoveryRequest.ValidateAll() if the designated +// constraints aren't met. +type DeltaDiscoveryRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeltaDiscoveryRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeltaDiscoveryRequestMultiError) AllErrors() []error { return m } + +// DeltaDiscoveryRequestValidationError is the validation error returned by +// DeltaDiscoveryRequest.Validate if the designated constraints aren't met. +type DeltaDiscoveryRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeltaDiscoveryRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeltaDiscoveryRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeltaDiscoveryRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeltaDiscoveryRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeltaDiscoveryRequestValidationError) ErrorName() string { + return "DeltaDiscoveryRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeltaDiscoveryRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeltaDiscoveryRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeltaDiscoveryRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeltaDiscoveryRequestValidationError{} + +// Validate checks the field values on DeltaDiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeltaDiscoveryResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeltaDiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeltaDiscoveryResponseMultiError, or nil if none found. +func (m *DeltaDiscoveryResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DeltaDiscoveryResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SystemVersionInfo + + for idx, item := range m.GetResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TypeUrl + + for idx, item := range m.GetRemovedResourceNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Nonce + + if all { + switch v := interface{}(m.GetControlPlane()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DeltaDiscoveryResponseMultiError(errors) + } + + return nil +} + +// DeltaDiscoveryResponseMultiError is an error wrapping multiple validation +// errors returned by DeltaDiscoveryResponse.ValidateAll() if the designated +// constraints aren't met. +type DeltaDiscoveryResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeltaDiscoveryResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeltaDiscoveryResponseMultiError) AllErrors() []error { return m } + +// DeltaDiscoveryResponseValidationError is the validation error returned by +// DeltaDiscoveryResponse.Validate if the designated constraints aren't met. +type DeltaDiscoveryResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeltaDiscoveryResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeltaDiscoveryResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeltaDiscoveryResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeltaDiscoveryResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeltaDiscoveryResponseValidationError) ErrorName() string { + return "DeltaDiscoveryResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DeltaDiscoveryResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeltaDiscoveryResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeltaDiscoveryResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeltaDiscoveryResponseValidationError{} + +// Validate checks the field values on DynamicParameterConstraints with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DynamicParameterConstraints with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DynamicParameterConstraintsMultiError, or nil if none found. +func (m *DynamicParameterConstraints) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Type.(type) { + case *DynamicParameterConstraints_Constraint: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConstraint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConstraint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_OrConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOrConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_AndConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAndConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_NotConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetNotConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return DynamicParameterConstraintsMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraintsMultiError is an error wrapping multiple +// validation errors returned by DynamicParameterConstraints.ValidateAll() if +// the designated constraints aren't met. +type DynamicParameterConstraintsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraintsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraintsMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraintsValidationError is the validation error returned +// by DynamicParameterConstraints.Validate if the designated constraints +// aren't met. +type DynamicParameterConstraintsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraintsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraintsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraintsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraintsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraintsValidationError) ErrorName() string { + return "DynamicParameterConstraintsValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraintsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraintsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraintsValidationError{} + +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetResourceName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTtl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTtl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCacheControl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCacheControl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_SingleConstraint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_SingleConstraint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_SingleConstraint with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// DynamicParameterConstraints_SingleConstraintMultiError, or nil if none found. +func (m *DynamicParameterConstraints_SingleConstraint) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_SingleConstraint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + oneofConstraintTypePresent := false + switch v := m.ConstraintType.(type) { + case *DynamicParameterConstraints_SingleConstraint_Value: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true + // no validation rules for Value + case *DynamicParameterConstraints_SingleConstraint_Exists_: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true + + if all { + switch v := interface{}(m.GetExists()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExists()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConstraintTypePresent { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DynamicParameterConstraints_SingleConstraintMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_SingleConstraintMultiError is an error wrapping +// multiple validation errors returned by +// DynamicParameterConstraints_SingleConstraint.ValidateAll() if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraintMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_SingleConstraintMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_SingleConstraintMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_SingleConstraintValidationError is the +// validation error returned by +// DynamicParameterConstraints_SingleConstraint.Validate if the designated +// constraints aren't met. +type DynamicParameterConstraints_SingleConstraintValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_SingleConstraintValidationError) ErrorName() string { + return "DynamicParameterConstraints_SingleConstraintValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_SingleConstraintValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_SingleConstraint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_SingleConstraintValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_SingleConstraintValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_ConstraintList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_ConstraintList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_ConstraintList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// DynamicParameterConstraints_ConstraintListMultiError, or nil if none found. +func (m *DynamicParameterConstraints_ConstraintList) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_ConstraintList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConstraints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return DynamicParameterConstraints_ConstraintListMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_ConstraintListMultiError is an error wrapping +// multiple validation errors returned by +// DynamicParameterConstraints_ConstraintList.ValidateAll() if the designated +// constraints aren't met. +type DynamicParameterConstraints_ConstraintListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_ConstraintListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_ConstraintListMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_ConstraintListValidationError is the validation +// error returned by DynamicParameterConstraints_ConstraintList.Validate if +// the designated constraints aren't met. +type DynamicParameterConstraints_ConstraintListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_ConstraintListValidationError) ErrorName() string { + return "DynamicParameterConstraints_ConstraintListValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_ConstraintListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_ConstraintList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_ConstraintListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_ConstraintListValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_SingleConstraint_Exists with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_SingleConstraint_Exists) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_SingleConstraint_Exists with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// DynamicParameterConstraints_SingleConstraint_ExistsMultiError, or nil if +// none found. +func (m *DynamicParameterConstraints_SingleConstraint_Exists) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DynamicParameterConstraints_SingleConstraint_ExistsMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_SingleConstraint_ExistsMultiError is an error +// wrapping multiple validation errors returned by +// DynamicParameterConstraints_SingleConstraint_Exists.ValidateAll() if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraint_ExistsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_SingleConstraint_ExistsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_SingleConstraint_ExistsMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_SingleConstraint_ExistsValidationError is the +// validation error returned by +// DynamicParameterConstraints_SingleConstraint_Exists.Validate if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraint_ExistsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) ErrorName() string { + return "DynamicParameterConstraints_SingleConstraint_ExistsValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_SingleConstraint_Exists.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_SingleConstraint_ExistsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_SingleConstraint_ExistsValidationError{} + +// Validate checks the field values on Resource_CacheControl with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Resource_CacheControl) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource_CacheControl with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Resource_CacheControlMultiError, or nil if none found. +func (m *Resource_CacheControl) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource_CacheControl) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DoNotCache + + if len(errors) > 0 { + return Resource_CacheControlMultiError(errors) + } + + return nil +} + +// Resource_CacheControlMultiError is an error wrapping multiple validation +// errors returned by Resource_CacheControl.ValidateAll() if the designated +// constraints aren't met. +type Resource_CacheControlMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Resource_CacheControlMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Resource_CacheControlMultiError) AllErrors() []error { return m } + +// Resource_CacheControlValidationError is the validation error returned by +// Resource_CacheControl.Validate if the designated constraints aren't met. +type Resource_CacheControlValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Resource_CacheControlValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Resource_CacheControlValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Resource_CacheControlValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Resource_CacheControlValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Resource_CacheControlValidationError) ErrorName() string { + return "Resource_CacheControlValidationError" +} + +// Error satisfies the builtin error interface +func (e Resource_CacheControlValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource_CacheControl.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Resource_CacheControlValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Resource_CacheControlValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go new file mode 100644 index 000000000..56a3ef579 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go @@ -0,0 +1,1546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceLocator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLocator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceLocator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DynamicParameterConstraints != nil { + size, err := m.DynamicParameterConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocators) > 0 { + for iNdEx := len(m.ResourceLocators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.Canary { + i-- + if m.Canary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Resources[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsUnsubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsSubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsSubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialResourceVersions) > 0 { + for k := range m.InitialResourceVersions { + v := m.InitialResourceVersions[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for iNdEx := len(m.ResourceNamesUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesUnsubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesUnsubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesUnsubscribe[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceNamesSubscribe) > 0 { + for iNdEx := len(m.ResourceNamesSubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesSubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesSubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesSubscribe[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemovedResourceNames) > 0 { + for iNdEx := len(m.RemovedResourceNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RemovedResourceNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.RemovedResources) > 0 { + for iNdEx := len(m.RemovedResources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemovedResources[iNdEx]) + copy(dAtA[i:], m.RemovedResources[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RemovedResources[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resources[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.SystemVersionInfo) > 0 { + i -= len(m.SystemVersionInfo) + copy(dAtA[i:], m.SystemVersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SystemVersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Exists_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Value); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exists != nil { + size, err := m.Exists.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_ConstraintList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Constraints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*DynamicParameterConstraints_NotConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_AndConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_OrConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_Constraint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Constraint != nil { + size, err := m.Constraint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_OrConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_OrConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrConstraints != nil { + size, err := m.OrConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_AndConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_AndConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndConstraints != nil { + size, err := m.AndConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_NotConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_NotConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotConstraints != nil { + size, err := m.NotConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Resource_CacheControl) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_CacheControl) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource_CacheControl) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DoNotCache { + i-- + if m.DoNotCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Resource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ResourceName != nil { + size, err := m.ResourceName.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.CacheControl != nil { + size, err := m.CacheControl.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Aliases) > 0 { + for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Aliases[iNdEx]) + copy(dAtA[i:], m.Aliases[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Aliases[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Resource != nil { + size, err := (*anypb.Any)(m.Resource).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceLocator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicParameterConstraints != nil { + l = m.DynamicParameterConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocators) > 0 { + for _, e := range m.ResourceLocators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Canary { + n += 2 + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNamesSubscribe) > 0 { + for _, s := range m.ResourceNamesSubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for _, s := range m.ResourceNamesUnsubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InitialResourceVersions) > 0 { + for k, v := range m.InitialResourceVersions { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for _, e := range m.ResourceLocatorsSubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for _, e := range m.ResourceLocatorsUnsubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SystemVersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResources) > 0 { + for _, s := range m.RemovedResources { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResourceNames) > 0 { + for _, e := range m.RemovedResourceNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConstraintType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists != nil { + l = m.Exists.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_ConstraintList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_Constraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Constraint != nil { + l = m.Constraint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_OrConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrConstraints != nil { + l = m.OrConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_AndConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndConstraints != nil { + l = m.AndConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_NotConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotConstraints != nil { + l = m.NotConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Resource_CacheControl) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoNotCache { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Resource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Resource != nil { + l = (*anypb.Any)(m.Resource).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CacheControl != nil { + l = m.CacheControl.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceName != nil { + l = m.ResourceName.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go new file mode 100644 index 000000000..6b47a93c6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go @@ -0,0 +1,325 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A load report Envoy sends to the management server. +type LoadStatsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node identifier for Envoy instance. + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of load stats to report. + ClusterStats []*v31.ClusterStats `protobuf:"bytes,2,rep,name=cluster_stats,json=clusterStats,proto3" json:"cluster_stats,omitempty"` +} + +func (x *LoadStatsRequest) Reset() { + *x = LoadStatsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadStatsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadStatsRequest) ProtoMessage() {} + +func (x *LoadStatsRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadStatsRequest.ProtoReflect.Descriptor instead. +func (*LoadStatsRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP(), []int{0} +} + +func (x *LoadStatsRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *LoadStatsRequest) GetClusterStats() []*v31.ClusterStats { + if x != nil { + return x.ClusterStats + } + return nil +} + +// The management server sends envoy a LoadStatsResponse with all clusters it +// is interested in learning load stats about. +type LoadStatsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clusters to report stats for. + // Not populated if “send_all_clusters“ is true. + Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + SendAllClusters bool `protobuf:"varint,4,opt,name=send_all_clusters,json=sendAllClusters,proto3" json:"send_all_clusters,omitempty"` + // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // + // 1. There may be some delay from when the timer fires until stats sampling occurs. + // 2. For clusters that were already feature in the previous “LoadStatsResponse“, any traffic + // that is observed in between the corresponding previous “LoadStatsRequest“ and this + // “LoadStatsResponse“ will also be accumulated and billed to the cluster. This avoids a period + // of inobservability that might otherwise exists between the messages. New clusters are not + // subject to this consideration. + LoadReportingInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=load_reporting_interval,json=loadReportingInterval,proto3" json:"load_reporting_interval,omitempty"` + // Set to “true“ if the management server supports endpoint granularity + // report. + ReportEndpointGranularity bool `protobuf:"varint,3,opt,name=report_endpoint_granularity,json=reportEndpointGranularity,proto3" json:"report_endpoint_granularity,omitempty"` +} + +func (x *LoadStatsResponse) Reset() { + *x = LoadStatsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadStatsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadStatsResponse) ProtoMessage() {} + +func (x *LoadStatsResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadStatsResponse.ProtoReflect.Descriptor instead. +func (*LoadStatsResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP(), []int{1} +} + +func (x *LoadStatsResponse) GetClusters() []string { + if x != nil { + return x.Clusters + } + return nil +} + +func (x *LoadStatsResponse) GetSendAllClusters() bool { + if x != nil { + return x.SendAllClusters + } + return false +} + +func (x *LoadStatsResponse) GetLoadReportingInterval() *durationpb.Duration { + if x != nil { + return x.LoadReportingInterval + } + return nil +} + +func (x *LoadStatsResponse) GetReportEndpointGranularity() bool { + if x != nil { + return x.ReportEndpointGranularity + } + return false +} + +var File_envoy_service_load_stats_v3_lrs_proto protoreflect.FileDescriptor + +var file_envoy_service_load_stats_v3_lrs_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x72, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x01, 0x0a, 0x10, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa4, 0x02, 0x0a, 0x11, 0x4c, + 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x11, + 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x51, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x1b, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x19, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x34, 0x9a, 0xc5, 0x88, + 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x32, 0x8e, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x76, 0x0a, 0x0f, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x29, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x4c, 0x72, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_load_stats_v3_lrs_proto_rawDescOnce sync.Once + file_envoy_service_load_stats_v3_lrs_proto_rawDescData = file_envoy_service_load_stats_v3_lrs_proto_rawDesc +) + +func file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP() []byte { + file_envoy_service_load_stats_v3_lrs_proto_rawDescOnce.Do(func() { + file_envoy_service_load_stats_v3_lrs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_load_stats_v3_lrs_proto_rawDescData) + }) + return file_envoy_service_load_stats_v3_lrs_proto_rawDescData +} + +var file_envoy_service_load_stats_v3_lrs_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_service_load_stats_v3_lrs_proto_goTypes = []interface{}{ + (*LoadStatsRequest)(nil), // 0: envoy.service.load_stats.v3.LoadStatsRequest + (*LoadStatsResponse)(nil), // 1: envoy.service.load_stats.v3.LoadStatsResponse + (*v3.Node)(nil), // 2: envoy.config.core.v3.Node + (*v31.ClusterStats)(nil), // 3: envoy.config.endpoint.v3.ClusterStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration +} +var file_envoy_service_load_stats_v3_lrs_proto_depIdxs = []int32{ + 2, // 0: envoy.service.load_stats.v3.LoadStatsRequest.node:type_name -> envoy.config.core.v3.Node + 3, // 1: envoy.service.load_stats.v3.LoadStatsRequest.cluster_stats:type_name -> envoy.config.endpoint.v3.ClusterStats + 4, // 2: envoy.service.load_stats.v3.LoadStatsResponse.load_reporting_interval:type_name -> google.protobuf.Duration + 0, // 3: envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats:input_type -> envoy.service.load_stats.v3.LoadStatsRequest + 1, // 4: envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats:output_type -> envoy.service.load_stats.v3.LoadStatsResponse + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_service_load_stats_v3_lrs_proto_init() } +func file_envoy_service_load_stats_v3_lrs_proto_init() { + if File_envoy_service_load_stats_v3_lrs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadStatsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadStatsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_load_stats_v3_lrs_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_load_stats_v3_lrs_proto_goTypes, + DependencyIndexes: file_envoy_service_load_stats_v3_lrs_proto_depIdxs, + MessageInfos: file_envoy_service_load_stats_v3_lrs_proto_msgTypes, + }.Build() + File_envoy_service_load_stats_v3_lrs_proto = out.File + file_envoy_service_load_stats_v3_lrs_proto_rawDesc = nil + file_envoy_service_load_stats_v3_lrs_proto_goTypes = nil + file_envoy_service_load_stats_v3_lrs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go new file mode 100644 index 000000000..cf4e395c2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go @@ -0,0 +1,335 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LoadStatsRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LoadStatsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadStatsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadStatsRequestMultiError, or nil if none found. +func (m *LoadStatsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadStatsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetClusterStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadStatsRequestMultiError(errors) + } + + return nil +} + +// LoadStatsRequestMultiError is an error wrapping multiple validation errors +// returned by LoadStatsRequest.ValidateAll() if the designated constraints +// aren't met. +type LoadStatsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadStatsRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadStatsRequestMultiError) AllErrors() []error { return m } + +// LoadStatsRequestValidationError is the validation error returned by +// LoadStatsRequest.Validate if the designated constraints aren't met. +type LoadStatsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadStatsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadStatsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadStatsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadStatsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadStatsRequestValidationError) ErrorName() string { return "LoadStatsRequestValidationError" } + +// Error satisfies the builtin error interface +func (e LoadStatsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadStatsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadStatsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadStatsRequestValidationError{} + +// Validate checks the field values on LoadStatsResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LoadStatsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadStatsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadStatsResponseMultiError, or nil if none found. +func (m *LoadStatsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadStatsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SendAllClusters + + if all { + switch v := interface{}(m.GetLoadReportingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadReportingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ReportEndpointGranularity + + if len(errors) > 0 { + return LoadStatsResponseMultiError(errors) + } + + return nil +} + +// LoadStatsResponseMultiError is an error wrapping multiple validation errors +// returned by LoadStatsResponse.ValidateAll() if the designated constraints +// aren't met. +type LoadStatsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadStatsResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadStatsResponseMultiError) AllErrors() []error { return m } + +// LoadStatsResponseValidationError is the validation error returned by +// LoadStatsResponse.Validate if the designated constraints aren't met. +type LoadStatsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadStatsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadStatsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadStatsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadStatsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadStatsResponseValidationError) ErrorName() string { + return "LoadStatsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadStatsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadStatsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadStatsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadStatsResponseValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go new file mode 100644 index 000000000..4eb34c173 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + LoadReportingService_StreamLoadStats_FullMethodName = "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats" +) + +// LoadReportingServiceClient is the client API for LoadReportingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadReportingServiceClient interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) +} + +type loadReportingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadReportingServiceClient(cc grpc.ClientConnInterface) LoadReportingServiceClient { + return &loadReportingServiceClient{cc} +} + +func (c *loadReportingServiceClient) StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) { + stream, err := c.cc.NewStream(ctx, &LoadReportingService_ServiceDesc.Streams[0], LoadReportingService_StreamLoadStats_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &loadReportingServiceStreamLoadStatsClient{stream} + return x, nil +} + +type LoadReportingService_StreamLoadStatsClient interface { + Send(*LoadStatsRequest) error + Recv() (*LoadStatsResponse, error) + grpc.ClientStream +} + +type loadReportingServiceStreamLoadStatsClient struct { + grpc.ClientStream +} + +func (x *loadReportingServiceStreamLoadStatsClient) Send(m *LoadStatsRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsClient) Recv() (*LoadStatsResponse, error) { + m := new(LoadStatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingServiceServer is the server API for LoadReportingService service. +// All implementations should embed UnimplementedLoadReportingServiceServer +// for forward compatibility +type LoadReportingServiceServer interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error +} + +// UnimplementedLoadReportingServiceServer should be embedded to have forward compatible implementations. +type UnimplementedLoadReportingServiceServer struct { +} + +func (UnimplementedLoadReportingServiceServer) StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamLoadStats not implemented") +} + +// UnsafeLoadReportingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoadReportingServiceServer will +// result in compilation errors. +type UnsafeLoadReportingServiceServer interface { + mustEmbedUnimplementedLoadReportingServiceServer() +} + +func RegisterLoadReportingServiceServer(s grpc.ServiceRegistrar, srv LoadReportingServiceServer) { + s.RegisterService(&LoadReportingService_ServiceDesc, srv) +} + +func _LoadReportingService_StreamLoadStats_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadReportingServiceServer).StreamLoadStats(&loadReportingServiceStreamLoadStatsServer{stream}) +} + +type LoadReportingService_StreamLoadStatsServer interface { + Send(*LoadStatsResponse) error + Recv() (*LoadStatsRequest, error) + grpc.ServerStream +} + +type loadReportingServiceStreamLoadStatsServer struct { + grpc.ServerStream +} + +func (x *loadReportingServiceStreamLoadStatsServer) Send(m *LoadStatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsServer) Recv() (*LoadStatsRequest, error) { + m := new(LoadStatsRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingService_ServiceDesc is the grpc.ServiceDesc for LoadReportingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LoadReportingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.load_stats.v3.LoadReportingService", + HandlerType: (*LoadReportingServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamLoadStats", + Handler: _LoadReportingService_StreamLoadStats_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/load_stats/v3/lrs.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go new file mode 100644 index 000000000..ca86ae774 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go @@ -0,0 +1,230 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LoadStatsRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStats) > 0 { + for iNdEx := len(m.ClusterStats) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ClusterStats[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterStats[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SendAllClusters { + i-- + if m.SendAllClusters { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ReportEndpointGranularity { + i-- + if m.ReportEndpointGranularity { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.LoadReportingInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterStats) > 0 { + for _, e := range m.ClusterStats { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LoadStatsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadReportingInterval != nil { + l = (*durationpb.Duration)(m.LoadReportingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportEndpointGranularity { + n += 2 + } + if m.SendAllClusters { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go new file mode 100644 index 000000000..4635ca028 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go @@ -0,0 +1,985 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Status of a config from a management server view. +type ConfigStatus int32 + +const ( + // Status info is not available/unknown. + ConfigStatus_UNKNOWN ConfigStatus = 0 + // Management server has sent the config to client and received ACK. + ConfigStatus_SYNCED ConfigStatus = 1 + // Config is not sent. + ConfigStatus_NOT_SENT ConfigStatus = 2 + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + ConfigStatus_STALE ConfigStatus = 3 + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. + ConfigStatus_ERROR ConfigStatus = 4 +) + +// Enum value maps for ConfigStatus. +var ( + ConfigStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SYNCED", + 2: "NOT_SENT", + 3: "STALE", + 4: "ERROR", + } + ConfigStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SYNCED": 1, + "NOT_SENT": 2, + "STALE": 3, + "ERROR": 4, + } +) + +func (x ConfigStatus) Enum() *ConfigStatus { + p := new(ConfigStatus) + *p = x + return p +} + +func (x ConfigStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConfigStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_service_status_v3_csds_proto_enumTypes[0].Descriptor() +} + +func (ConfigStatus) Type() protoreflect.EnumType { + return &file_envoy_service_status_v3_csds_proto_enumTypes[0] +} + +func (x ConfigStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConfigStatus.Descriptor instead. +func (ConfigStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{0} +} + +// Config status from a client-side view. +type ClientConfigStatus int32 + +const ( + // Config status is not available/unknown. + ClientConfigStatus_CLIENT_UNKNOWN ClientConfigStatus = 0 + // Client requested the config but hasn't received any config from management + // server yet. + ClientConfigStatus_CLIENT_REQUESTED ClientConfigStatus = 1 + // Client received the config and replied with ACK. + ClientConfigStatus_CLIENT_ACKED ClientConfigStatus = 2 + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + ClientConfigStatus_CLIENT_NACKED ClientConfigStatus = 3 +) + +// Enum value maps for ClientConfigStatus. +var ( + ClientConfigStatus_name = map[int32]string{ + 0: "CLIENT_UNKNOWN", + 1: "CLIENT_REQUESTED", + 2: "CLIENT_ACKED", + 3: "CLIENT_NACKED", + } + ClientConfigStatus_value = map[string]int32{ + "CLIENT_UNKNOWN": 0, + "CLIENT_REQUESTED": 1, + "CLIENT_ACKED": 2, + "CLIENT_NACKED": 3, + } +) + +func (x ClientConfigStatus) Enum() *ClientConfigStatus { + p := new(ClientConfigStatus) + *p = x + return p +} + +func (x ClientConfigStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientConfigStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_service_status_v3_csds_proto_enumTypes[1].Descriptor() +} + +func (ClientConfigStatus) Type() protoreflect.EnumType { + return &file_envoy_service_status_v3_csds_proto_enumTypes[1] +} + +func (x ClientConfigStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientConfigStatus.Descriptor instead. +func (ClientConfigStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{1} +} + +// Request for client status of clients identified by a list of NodeMatchers. +type ClientStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + NodeMatchers []*v3.NodeMatcher `protobuf:"bytes,1,rep,name=node_matchers,json=nodeMatchers,proto3" json:"node_matchers,omitempty"` + // The node making the csds request. + Node *v31.Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + // If true, the server will not include the resource contents in the response + // (i.e., the generic_xds_configs.xds_config field will not be populated). + // [#not-implemented-hide:] + ExcludeResourceContents bool `protobuf:"varint,3,opt,name=exclude_resource_contents,json=excludeResourceContents,proto3" json:"exclude_resource_contents,omitempty"` +} + +func (x *ClientStatusRequest) Reset() { + *x = ClientStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatusRequest) ProtoMessage() {} + +func (x *ClientStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatusRequest.ProtoReflect.Descriptor instead. +func (*ClientStatusRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientStatusRequest) GetNodeMatchers() []*v3.NodeMatcher { + if x != nil { + return x.NodeMatchers + } + return nil +} + +func (x *ClientStatusRequest) GetNode() *v31.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *ClientStatusRequest) GetExcludeResourceContents() bool { + if x != nil { + return x.ExcludeResourceContents + } + return false +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 8] +type PerXdsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + Status ConfigStatus `protobuf:"varint,1,opt,name=status,proto3,enum=envoy.service.status.v3.ConfigStatus" json:"status,omitempty"` + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + // + // .. attention:: + // + // This field is deprecated. Use :ref:`ClientResourceStatus + // ` for per-resource + // config status instead. + // + // Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. + ClientStatus ClientConfigStatus `protobuf:"varint,7,opt,name=client_status,json=clientStatus,proto3,enum=envoy.service.status.v3.ClientConfigStatus" json:"client_status,omitempty"` + // Types that are assignable to PerXdsConfig: + // + // *PerXdsConfig_ListenerConfig + // *PerXdsConfig_ClusterConfig + // *PerXdsConfig_RouteConfig + // *PerXdsConfig_ScopedRouteConfig + // *PerXdsConfig_EndpointConfig + PerXdsConfig isPerXdsConfig_PerXdsConfig `protobuf_oneof:"per_xds_config"` +} + +func (x *PerXdsConfig) Reset() { + *x = PerXdsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PerXdsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PerXdsConfig) ProtoMessage() {} + +func (x *PerXdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PerXdsConfig.ProtoReflect.Descriptor instead. +func (*PerXdsConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{1} +} + +func (x *PerXdsConfig) GetStatus() ConfigStatus { + if x != nil { + return x.Status + } + return ConfigStatus_UNKNOWN +} + +// Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. +func (x *PerXdsConfig) GetClientStatus() ClientConfigStatus { + if x != nil { + return x.ClientStatus + } + return ClientConfigStatus_CLIENT_UNKNOWN +} + +func (m *PerXdsConfig) GetPerXdsConfig() isPerXdsConfig_PerXdsConfig { + if m != nil { + return m.PerXdsConfig + } + return nil +} + +func (x *PerXdsConfig) GetListenerConfig() *v32.ListenersConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ListenerConfig); ok { + return x.ListenerConfig + } + return nil +} + +func (x *PerXdsConfig) GetClusterConfig() *v32.ClustersConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ClusterConfig); ok { + return x.ClusterConfig + } + return nil +} + +func (x *PerXdsConfig) GetRouteConfig() *v32.RoutesConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_RouteConfig); ok { + return x.RouteConfig + } + return nil +} + +func (x *PerXdsConfig) GetScopedRouteConfig() *v32.ScopedRoutesConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ScopedRouteConfig); ok { + return x.ScopedRouteConfig + } + return nil +} + +func (x *PerXdsConfig) GetEndpointConfig() *v32.EndpointsConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_EndpointConfig); ok { + return x.EndpointConfig + } + return nil +} + +type isPerXdsConfig_PerXdsConfig interface { + isPerXdsConfig_PerXdsConfig() +} + +type PerXdsConfig_ListenerConfig struct { + ListenerConfig *v32.ListenersConfigDump `protobuf:"bytes,2,opt,name=listener_config,json=listenerConfig,proto3,oneof"` +} + +type PerXdsConfig_ClusterConfig struct { + ClusterConfig *v32.ClustersConfigDump `protobuf:"bytes,3,opt,name=cluster_config,json=clusterConfig,proto3,oneof"` +} + +type PerXdsConfig_RouteConfig struct { + RouteConfig *v32.RoutesConfigDump `protobuf:"bytes,4,opt,name=route_config,json=routeConfig,proto3,oneof"` +} + +type PerXdsConfig_ScopedRouteConfig struct { + ScopedRouteConfig *v32.ScopedRoutesConfigDump `protobuf:"bytes,5,opt,name=scoped_route_config,json=scopedRouteConfig,proto3,oneof"` +} + +type PerXdsConfig_EndpointConfig struct { + EndpointConfig *v32.EndpointsConfigDump `protobuf:"bytes,6,opt,name=endpoint_config,json=endpointConfig,proto3,oneof"` +} + +func (*PerXdsConfig_ListenerConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_ClusterConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_RouteConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_ScopedRouteConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_EndpointConfig) isPerXdsConfig_PerXdsConfig() {} + +// All xds configs for a particular client. +type ClientConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node for a particular client. + Node *v31.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // This field is deprecated in favor of generic_xds_configs which is + // much simpler and uniform in structure. + // + // Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. + XdsConfig []*PerXdsConfig `protobuf:"bytes,2,rep,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + // Represents generic xDS config and the exact config structure depends on + // the type URL (like Cluster if it is CDS) + GenericXdsConfigs []*ClientConfig_GenericXdsConfig `protobuf:"bytes,3,rep,name=generic_xds_configs,json=genericXdsConfigs,proto3" json:"generic_xds_configs,omitempty"` + // For xDS clients, the scope in which the data is used. + // For example, gRPC indicates the data plane target or that the data is + // associated with gRPC server(s). + ClientScope string `protobuf:"bytes,4,opt,name=client_scope,json=clientScope,proto3" json:"client_scope,omitempty"` +} + +func (x *ClientConfig) Reset() { + *x = ClientConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig) ProtoMessage() {} + +func (x *ClientConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientConfig) GetNode() *v31.Node { + if x != nil { + return x.Node + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. +func (x *ClientConfig) GetXdsConfig() []*PerXdsConfig { + if x != nil { + return x.XdsConfig + } + return nil +} + +func (x *ClientConfig) GetGenericXdsConfigs() []*ClientConfig_GenericXdsConfig { + if x != nil { + return x.GenericXdsConfigs + } + return nil +} + +func (x *ClientConfig) GetClientScope() string { + if x != nil { + return x.ClientScope + } + return "" +} + +type ClientStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Client configs for the clients specified in the ClientStatusRequest. + Config []*ClientConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` +} + +func (x *ClientStatusResponse) Reset() { + *x = ClientStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatusResponse) ProtoMessage() {} + +func (x *ClientStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatusResponse.ProtoReflect.Descriptor instead. +func (*ClientStatusResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientStatusResponse) GetConfig() []*ClientConfig { + if x != nil { + return x.Config + } + return nil +} + +// GenericXdsConfig is used to specify the config status and the dump +// of any xDS resource identified by their type URL. It is the generalized +// version of the now deprecated ListenersConfigDump, ClustersConfigDump etc +// [#next-free-field: 10] +type ClientConfig_GenericXdsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type_url represents the fully qualified name of xDS resource type + // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Name of the xDS resource + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // This is the :ref:`version_info ` + // in the last processed xDS discovery response. If there are only + // static bootstrap listeners, this field will be "" + VersionInfo string `protobuf:"bytes,3,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The xDS resource config. Actual content depends on the type + XdsConfig *anypb.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + // Timestamp when the xDS resource was last updated + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Per xDS resource config status. It is generated by management servers. + // It will not be present if the CSDS server is an xDS client. + ConfigStatus ConfigStatus `protobuf:"varint,6,opt,name=config_status,json=configStatus,proto3,enum=envoy.service.status.v3.ConfigStatus" json:"config_status,omitempty"` + // Per xDS resource status from the view of a xDS client + ClientStatus v32.ClientResourceStatus `protobuf:"varint,7,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` + // Set if the last update failed, cleared after the next successful + // update. The *error_state* field contains the rejected version of + // this particular resource along with the reason and timestamp. For + // successfully updated or acknowledged resource, this field should + // be empty. + // [#not-implemented-hide:] + ErrorState *v32.UpdateFailureState `protobuf:"bytes,8,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // Is static resource is true if it is specified in the config supplied + // through the file at the startup. + IsStaticResource bool `protobuf:"varint,9,opt,name=is_static_resource,json=isStaticResource,proto3" json:"is_static_resource,omitempty"` +} + +func (x *ClientConfig_GenericXdsConfig) Reset() { + *x = ClientConfig_GenericXdsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig_GenericXdsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig_GenericXdsConfig) ProtoMessage() {} + +func (x *ClientConfig_GenericXdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig_GenericXdsConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig_GenericXdsConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClientConfig_GenericXdsConfig) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *anypb.Any { + if x != nil { + return x.XdsConfig + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetConfigStatus() ConfigStatus { + if x != nil { + return x.ConfigStatus + } + return ConfigStatus_UNKNOWN +} + +func (x *ClientConfig_GenericXdsConfig) GetClientStatus() v32.ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return v32.ClientResourceStatus(0) +} + +func (x *ClientConfig_GenericXdsConfig) GetErrorState() *v32.UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetIsStaticResource() bool { + if x != nil { + return x.IsStaticResource + } + return false +} + +var File_envoy_service_status_v3_csds_proto protoreflect.FileDescriptor + +var file_envoy_service_status_v3_csds_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x73, 0x64, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6e, + 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf9, 0x04, 0x0a, 0x0c, + 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x0d, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, + 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, + 0x0a, 0x13, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, + 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x5f, 0x78, 0x64, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xae, 0x06, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x51, 0x0a, 0x0a, 0x78, 0x64, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, + 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x66, 0x0a, 0x13, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, + 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, + 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, + 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, + 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0x04, 0x2a, 0x63, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, 0x4b, + 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4e, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03, 0x32, 0xb2, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x98, 0x01, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, + 0x1b, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x85, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, + 0x43, 0x73, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_status_v3_csds_proto_rawDescOnce sync.Once + file_envoy_service_status_v3_csds_proto_rawDescData = file_envoy_service_status_v3_csds_proto_rawDesc +) + +func file_envoy_service_status_v3_csds_proto_rawDescGZIP() []byte { + file_envoy_service_status_v3_csds_proto_rawDescOnce.Do(func() { + file_envoy_service_status_v3_csds_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_status_v3_csds_proto_rawDescData) + }) + return file_envoy_service_status_v3_csds_proto_rawDescData +} + +var file_envoy_service_status_v3_csds_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_service_status_v3_csds_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_service_status_v3_csds_proto_goTypes = []interface{}{ + (ConfigStatus)(0), // 0: envoy.service.status.v3.ConfigStatus + (ClientConfigStatus)(0), // 1: envoy.service.status.v3.ClientConfigStatus + (*ClientStatusRequest)(nil), // 2: envoy.service.status.v3.ClientStatusRequest + (*PerXdsConfig)(nil), // 3: envoy.service.status.v3.PerXdsConfig + (*ClientConfig)(nil), // 4: envoy.service.status.v3.ClientConfig + (*ClientStatusResponse)(nil), // 5: envoy.service.status.v3.ClientStatusResponse + (*ClientConfig_GenericXdsConfig)(nil), // 6: envoy.service.status.v3.ClientConfig.GenericXdsConfig + (*v3.NodeMatcher)(nil), // 7: envoy.type.matcher.v3.NodeMatcher + (*v31.Node)(nil), // 8: envoy.config.core.v3.Node + (*v32.ListenersConfigDump)(nil), // 9: envoy.admin.v3.ListenersConfigDump + (*v32.ClustersConfigDump)(nil), // 10: envoy.admin.v3.ClustersConfigDump + (*v32.RoutesConfigDump)(nil), // 11: envoy.admin.v3.RoutesConfigDump + (*v32.ScopedRoutesConfigDump)(nil), // 12: envoy.admin.v3.ScopedRoutesConfigDump + (*v32.EndpointsConfigDump)(nil), // 13: envoy.admin.v3.EndpointsConfigDump + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (v32.ClientResourceStatus)(0), // 16: envoy.admin.v3.ClientResourceStatus + (*v32.UpdateFailureState)(nil), // 17: envoy.admin.v3.UpdateFailureState +} +var file_envoy_service_status_v3_csds_proto_depIdxs = []int32{ + 7, // 0: envoy.service.status.v3.ClientStatusRequest.node_matchers:type_name -> envoy.type.matcher.v3.NodeMatcher + 8, // 1: envoy.service.status.v3.ClientStatusRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 2: envoy.service.status.v3.PerXdsConfig.status:type_name -> envoy.service.status.v3.ConfigStatus + 1, // 3: envoy.service.status.v3.PerXdsConfig.client_status:type_name -> envoy.service.status.v3.ClientConfigStatus + 9, // 4: envoy.service.status.v3.PerXdsConfig.listener_config:type_name -> envoy.admin.v3.ListenersConfigDump + 10, // 5: envoy.service.status.v3.PerXdsConfig.cluster_config:type_name -> envoy.admin.v3.ClustersConfigDump + 11, // 6: envoy.service.status.v3.PerXdsConfig.route_config:type_name -> envoy.admin.v3.RoutesConfigDump + 12, // 7: envoy.service.status.v3.PerXdsConfig.scoped_route_config:type_name -> envoy.admin.v3.ScopedRoutesConfigDump + 13, // 8: envoy.service.status.v3.PerXdsConfig.endpoint_config:type_name -> envoy.admin.v3.EndpointsConfigDump + 8, // 9: envoy.service.status.v3.ClientConfig.node:type_name -> envoy.config.core.v3.Node + 3, // 10: envoy.service.status.v3.ClientConfig.xds_config:type_name -> envoy.service.status.v3.PerXdsConfig + 6, // 11: envoy.service.status.v3.ClientConfig.generic_xds_configs:type_name -> envoy.service.status.v3.ClientConfig.GenericXdsConfig + 4, // 12: envoy.service.status.v3.ClientStatusResponse.config:type_name -> envoy.service.status.v3.ClientConfig + 14, // 13: envoy.service.status.v3.ClientConfig.GenericXdsConfig.xds_config:type_name -> google.protobuf.Any + 15, // 14: envoy.service.status.v3.ClientConfig.GenericXdsConfig.last_updated:type_name -> google.protobuf.Timestamp + 0, // 15: envoy.service.status.v3.ClientConfig.GenericXdsConfig.config_status:type_name -> envoy.service.status.v3.ConfigStatus + 16, // 16: envoy.service.status.v3.ClientConfig.GenericXdsConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 17, // 17: envoy.service.status.v3.ClientConfig.GenericXdsConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 2, // 18: envoy.service.status.v3.ClientStatusDiscoveryService.StreamClientStatus:input_type -> envoy.service.status.v3.ClientStatusRequest + 2, // 19: envoy.service.status.v3.ClientStatusDiscoveryService.FetchClientStatus:input_type -> envoy.service.status.v3.ClientStatusRequest + 5, // 20: envoy.service.status.v3.ClientStatusDiscoveryService.StreamClientStatus:output_type -> envoy.service.status.v3.ClientStatusResponse + 5, // 21: envoy.service.status.v3.ClientStatusDiscoveryService.FetchClientStatus:output_type -> envoy.service.status.v3.ClientStatusResponse + 20, // [20:22] is the sub-list for method output_type + 18, // [18:20] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_envoy_service_status_v3_csds_proto_init() } +func file_envoy_service_status_v3_csds_proto_init() { + if File_envoy_service_status_v3_csds_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_status_v3_csds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PerXdsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig_GenericXdsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PerXdsConfig_ListenerConfig)(nil), + (*PerXdsConfig_ClusterConfig)(nil), + (*PerXdsConfig_RouteConfig)(nil), + (*PerXdsConfig_ScopedRouteConfig)(nil), + (*PerXdsConfig_EndpointConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_status_v3_csds_proto_rawDesc, + NumEnums: 2, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_status_v3_csds_proto_goTypes, + DependencyIndexes: file_envoy_service_status_v3_csds_proto_depIdxs, + EnumInfos: file_envoy_service_status_v3_csds_proto_enumTypes, + MessageInfos: file_envoy_service_status_v3_csds_proto_msgTypes, + }.Build() + File_envoy_service_status_v3_csds_proto = out.File + file_envoy_service_status_v3_csds_proto_rawDesc = nil + file_envoy_service_status_v3_csds_proto_goTypes = nil + file_envoy_service_status_v3_csds_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go new file mode 100644 index 000000000..d27eee646 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go @@ -0,0 +1,1057 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/admin/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.ClientResourceStatus(0) +) + +// Validate checks the field values on ClientStatusRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientStatusRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientStatusRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientStatusRequestMultiError, or nil if none found. +func (m *ClientStatusRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientStatusRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetNodeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ExcludeResourceContents + + if len(errors) > 0 { + return ClientStatusRequestMultiError(errors) + } + + return nil +} + +// ClientStatusRequestMultiError is an error wrapping multiple validation +// errors returned by ClientStatusRequest.ValidateAll() if the designated +// constraints aren't met. +type ClientStatusRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientStatusRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientStatusRequestMultiError) AllErrors() []error { return m } + +// ClientStatusRequestValidationError is the validation error returned by +// ClientStatusRequest.Validate if the designated constraints aren't met. +type ClientStatusRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientStatusRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientStatusRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientStatusRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientStatusRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientStatusRequestValidationError) ErrorName() string { + return "ClientStatusRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientStatusRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientStatusRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientStatusRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientStatusRequestValidationError{} + +// Validate checks the field values on PerXdsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PerXdsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PerXdsConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PerXdsConfigMultiError, or +// nil if none found. +func (m *PerXdsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *PerXdsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Status + + // no validation rules for ClientStatus + + switch v := m.PerXdsConfig.(type) { + case *PerXdsConfig_ListenerConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetListenerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_ClusterConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetClusterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_RouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_ScopedRouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopedRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_EndpointConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PerXdsConfigMultiError(errors) + } + + return nil +} + +// PerXdsConfigMultiError is an error wrapping multiple validation errors +// returned by PerXdsConfig.ValidateAll() if the designated constraints aren't met. +type PerXdsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PerXdsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PerXdsConfigMultiError) AllErrors() []error { return m } + +// PerXdsConfigValidationError is the validation error returned by +// PerXdsConfig.Validate if the designated constraints aren't met. +type PerXdsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PerXdsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PerXdsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PerXdsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PerXdsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PerXdsConfigValidationError) ErrorName() string { return "PerXdsConfigValidationError" } + +// Error satisfies the builtin error interface +func (e PerXdsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPerXdsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PerXdsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PerXdsConfigValidationError{} + +// Validate checks the field values on ClientConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClientConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClientConfigMultiError, or +// nil if none found. +func (m *ClientConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetXdsConfig() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetGenericXdsConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for ClientScope + + if len(errors) > 0 { + return ClientConfigMultiError(errors) + } + + return nil +} + +// ClientConfigMultiError is an error wrapping multiple validation errors +// returned by ClientConfig.ValidateAll() if the designated constraints aren't met. +type ClientConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfigMultiError) AllErrors() []error { return m } + +// ClientConfigValidationError is the validation error returned by +// ClientConfig.Validate if the designated constraints aren't met. +type ClientConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfigValidationError) ErrorName() string { return "ClientConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClientConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfigValidationError{} + +// Validate checks the field values on ClientStatusResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientStatusResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientStatusResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientStatusResponseMultiError, or nil if none found. +func (m *ClientStatusResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientStatusResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConfig() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClientStatusResponseMultiError(errors) + } + + return nil +} + +// ClientStatusResponseMultiError is an error wrapping multiple validation +// errors returned by ClientStatusResponse.ValidateAll() if the designated +// constraints aren't met. +type ClientStatusResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientStatusResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientStatusResponseMultiError) AllErrors() []error { return m } + +// ClientStatusResponseValidationError is the validation error returned by +// ClientStatusResponse.Validate if the designated constraints aren't met. +type ClientStatusResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientStatusResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientStatusResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientStatusResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientStatusResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientStatusResponseValidationError) ErrorName() string { + return "ClientStatusResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientStatusResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientStatusResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientStatusResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientStatusResponseValidationError{} + +// Validate checks the field values on ClientConfig_GenericXdsConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientConfig_GenericXdsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig_GenericXdsConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClientConfig_GenericXdsConfigMultiError, or nil if none found. +func (m *ClientConfig_GenericXdsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig_GenericXdsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + // no validation rules for Name + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetXdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConfigStatus + + // no validation rules for ClientStatus + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsStaticResource + + if len(errors) > 0 { + return ClientConfig_GenericXdsConfigMultiError(errors) + } + + return nil +} + +// ClientConfig_GenericXdsConfigMultiError is an error wrapping multiple +// validation errors returned by ClientConfig_GenericXdsConfig.ValidateAll() +// if the designated constraints aren't met. +type ClientConfig_GenericXdsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfig_GenericXdsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfig_GenericXdsConfigMultiError) AllErrors() []error { return m } + +// ClientConfig_GenericXdsConfigValidationError is the validation error +// returned by ClientConfig_GenericXdsConfig.Validate if the designated +// constraints aren't met. +type ClientConfig_GenericXdsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfig_GenericXdsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfig_GenericXdsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfig_GenericXdsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfig_GenericXdsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfig_GenericXdsConfigValidationError) ErrorName() string { + return "ClientConfig_GenericXdsConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientConfig_GenericXdsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig_GenericXdsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfig_GenericXdsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfig_GenericXdsConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go new file mode 100644 index 000000000..abe9abebd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ClientStatusDiscoveryService_StreamClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus" + ClientStatusDiscoveryService_FetchClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus" +) + +// ClientStatusDiscoveryServiceClient is the client API for ClientStatusDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ClientStatusDiscoveryServiceClient interface { + StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) + FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) +} + +type clientStatusDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewClientStatusDiscoveryServiceClient(cc grpc.ClientConnInterface) ClientStatusDiscoveryServiceClient { + return &clientStatusDiscoveryServiceClient{cc} +} + +func (c *clientStatusDiscoveryServiceClient) StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) { + stream, err := c.cc.NewStream(ctx, &ClientStatusDiscoveryService_ServiceDesc.Streams[0], ClientStatusDiscoveryService_StreamClientStatus_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &clientStatusDiscoveryServiceStreamClientStatusClient{stream} + return x, nil +} + +type ClientStatusDiscoveryService_StreamClientStatusClient interface { + Send(*ClientStatusRequest) error + Recv() (*ClientStatusResponse, error) + grpc.ClientStream +} + +type clientStatusDiscoveryServiceStreamClientStatusClient struct { + grpc.ClientStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Send(m *ClientStatusRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Recv() (*ClientStatusResponse, error) { + m := new(ClientStatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *clientStatusDiscoveryServiceClient) FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) { + out := new(ClientStatusResponse) + err := c.cc.Invoke(ctx, ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClientStatusDiscoveryServiceServer is the server API for ClientStatusDiscoveryService service. +// All implementations should embed UnimplementedClientStatusDiscoveryServiceServer +// for forward compatibility +type ClientStatusDiscoveryServiceServer interface { + StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error + FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) +} + +// UnimplementedClientStatusDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedClientStatusDiscoveryServiceServer struct { +} + +func (UnimplementedClientStatusDiscoveryServiceServer) StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error { + return status.Errorf(codes.Unimplemented, "method StreamClientStatus not implemented") +} +func (UnimplementedClientStatusDiscoveryServiceServer) FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchClientStatus not implemented") +} + +// UnsafeClientStatusDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ClientStatusDiscoveryServiceServer will +// result in compilation errors. +type UnsafeClientStatusDiscoveryServiceServer interface { + mustEmbedUnimplementedClientStatusDiscoveryServiceServer() +} + +func RegisterClientStatusDiscoveryServiceServer(s grpc.ServiceRegistrar, srv ClientStatusDiscoveryServiceServer) { + s.RegisterService(&ClientStatusDiscoveryService_ServiceDesc, srv) +} + +func _ClientStatusDiscoveryService_StreamClientStatus_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ClientStatusDiscoveryServiceServer).StreamClientStatus(&clientStatusDiscoveryServiceStreamClientStatusServer{stream}) +} + +type ClientStatusDiscoveryService_StreamClientStatusServer interface { + Send(*ClientStatusResponse) error + Recv() (*ClientStatusRequest, error) + grpc.ServerStream +} + +type clientStatusDiscoveryServiceStreamClientStatusServer struct { + grpc.ServerStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Send(m *ClientStatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Recv() (*ClientStatusRequest, error) { + m := new(ClientStatusRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ClientStatusDiscoveryService_FetchClientStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClientStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, req.(*ClientStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ClientStatusDiscoveryService_ServiceDesc is the grpc.ServiceDesc for ClientStatusDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ClientStatusDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.status.v3.ClientStatusDiscoveryService", + HandlerType: (*ClientStatusDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchClientStatus", + Handler: _ClientStatusDiscoveryService_FetchClientStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamClientStatus", + Handler: _ClientStatusDiscoveryService_StreamClientStatus_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/status/v3/csds.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go new file mode 100644 index 000000000..a55983e81 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go @@ -0,0 +1,866 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientStatusRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ExcludeResourceContents { + i-- + if m.ExcludeResourceContents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.NodeMatchers) > 0 { + for iNdEx := len(m.NodeMatchers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.NodeMatchers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.NodeMatchers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PerXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_EndpointConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ScopedRouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ClusterConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ListenerConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListenerConfig != nil { + if vtmsg, ok := interface{}(m.ListenerConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterConfig != nil { + if vtmsg, ok := interface{}(m.ClusterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfig != nil { + if vtmsg, ok := interface{}(m.ScopedRouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_EndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_EndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndpointConfig != nil { + if vtmsg, ok := interface{}(m.EndpointConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ClientConfig_GenericXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsStaticResource { + i-- + if m.IsStaticResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.ErrorState != nil { + if vtmsg, ok := interface{}(m.ErrorState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if m.ConfigStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ConfigStatus)) + i-- + dAtA[i] = 0x30 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.XdsConfig != nil { + size, err := (*anypb.Any)(m.XdsConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClientScope) > 0 { + i -= len(m.ClientScope) + copy(dAtA[i:], m.ClientScope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientScope))) + i-- + dAtA[i] = 0x22 + } + if len(m.GenericXdsConfigs) > 0 { + for iNdEx := len(m.GenericXdsConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GenericXdsConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.XdsConfig) > 0 { + for iNdEx := len(m.XdsConfig) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.XdsConfig[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Config[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeMatchers) > 0 { + for _, e := range m.NodeMatchers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExcludeResourceContents { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if vtmsg, ok := m.PerXdsConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig_ListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListenerConfig != nil { + if size, ok := interface{}(m.ListenerConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterConfig != nil { + if size, ok := interface{}(m.ClusterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClusterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ScopedRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfig != nil { + if size, ok := interface{}(m.ScopedRouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_EndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + if size, ok := interface{}(m.EndpointConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ClientConfig_GenericXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfig != nil { + l = (*anypb.Any)(m.XdsConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ConfigStatus)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + if m.ErrorState != nil { + if size, ok := interface{}(m.ErrorState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsStaticResource { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.XdsConfig) > 0 { + for _, e := range m.XdsConfig { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.GenericXdsConfigs) > 0 { + for _, e := range m.GenericXdsConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClientScope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go new file mode 100644 index 000000000..8afb4e8d1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Cookie defines an API for obtaining or generating HTTP cookie. +type Cookie struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name that will be used to obtain cookie value from downstream HTTP request or generate + // new cookie for downstream. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Duration of cookie. This will be used to set the expiry time of a new cookie when it is + // generated. Set this to 0s to use a session cookie and disable cookie expiration. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Path of cookie. This will be used to set the path of a new cookie when it is generated. + // If no path is specified here, no path will be set for the cookie. + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Cookie) Reset() { + *x = Cookie{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_cookie_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cookie) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cookie) ProtoMessage() {} + +func (x *Cookie) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_cookie_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cookie.ProtoReflect.Descriptor instead. +func (*Cookie) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_cookie_proto_rawDescGZIP(), []int{0} +} + +func (x *Cookie) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cookie) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *Cookie) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_envoy_type_http_v3_cookie_proto protoreflect.FileDescriptor + +var file_envoy_type_http_v3_cookie_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, + 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, + 0x7b, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, + 0x74, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_http_v3_cookie_proto_rawDescOnce sync.Once + file_envoy_type_http_v3_cookie_proto_rawDescData = file_envoy_type_http_v3_cookie_proto_rawDesc +) + +func file_envoy_type_http_v3_cookie_proto_rawDescGZIP() []byte { + file_envoy_type_http_v3_cookie_proto_rawDescOnce.Do(func() { + file_envoy_type_http_v3_cookie_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_v3_cookie_proto_rawDescData) + }) + return file_envoy_type_http_v3_cookie_proto_rawDescData +} + +var file_envoy_type_http_v3_cookie_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_http_v3_cookie_proto_goTypes = []interface{}{ + (*Cookie)(nil), // 0: envoy.type.http.v3.Cookie + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_type_http_v3_cookie_proto_depIdxs = []int32{ + 1, // 0: envoy.type.http.v3.Cookie.ttl:type_name -> google.protobuf.Duration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_http_v3_cookie_proto_init() } +func file_envoy_type_http_v3_cookie_proto_init() { + if File_envoy_type_http_v3_cookie_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_http_v3_cookie_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cookie); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_http_v3_cookie_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_http_v3_cookie_proto_goTypes, + DependencyIndexes: file_envoy_type_http_v3_cookie_proto_depIdxs, + MessageInfos: file_envoy_type_http_v3_cookie_proto_msgTypes, + }.Build() + File_envoy_type_http_v3_cookie_proto = out.File + file_envoy_type_http_v3_cookie_proto_rawDesc = nil + file_envoy_type_http_v3_cookie_proto_goTypes = nil + file_envoy_type_http_v3_cookie_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go new file mode 100644 index 000000000..3daecd3de --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go @@ -0,0 +1,178 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Cookie with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cookie) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cookie with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in CookieMultiError, or nil if none found. +func (m *Cookie) ValidateAll() error { + return m.validate(true) +} + +func (m *Cookie) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CookieValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTtl(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = CookieValidationError{ + field: "Ttl", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := CookieValidationError{ + field: "Ttl", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for Path + + if len(errors) > 0 { + return CookieMultiError(errors) + } + + return nil +} + +// CookieMultiError is an error wrapping multiple validation errors returned by +// Cookie.ValidateAll() if the designated constraints aren't met. +type CookieMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CookieMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CookieMultiError) AllErrors() []error { return m } + +// CookieValidationError is the validation error returned by Cookie.Validate if +// the designated constraints aren't met. +type CookieValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CookieValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CookieValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CookieValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CookieValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CookieValidationError) ErrorName() string { return "CookieValidationError" } + +// Error satisfies the builtin error interface +func (e CookieValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCookie.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CookieValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CookieValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go new file mode 100644 index 000000000..66ab8b784 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go @@ -0,0 +1,99 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go new file mode 100644 index 000000000..dda21b56b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go @@ -0,0 +1,403 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PathTransformation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of operations to apply. Transformations will be performed in the order that they appear. + Operations []*PathTransformation_Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` +} + +func (x *PathTransformation) Reset() { + *x = PathTransformation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation) ProtoMessage() {} + +func (x *PathTransformation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation.ProtoReflect.Descriptor instead. +func (*PathTransformation) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0} +} + +func (x *PathTransformation) GetOperations() []*PathTransformation_Operation { + if x != nil { + return x.Operations + } + return nil +} + +// A type of operation to alter text. +type PathTransformation_Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OperationSpecifier: + // + // *PathTransformation_Operation_NormalizePathRfc_3986 + // *PathTransformation_Operation_MergeSlashes_ + OperationSpecifier isPathTransformation_Operation_OperationSpecifier `protobuf_oneof:"operation_specifier"` +} + +func (x *PathTransformation_Operation) Reset() { + *x = PathTransformation_Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation) ProtoMessage() {} + +func (x *PathTransformation_Operation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *PathTransformation_Operation) GetOperationSpecifier() isPathTransformation_Operation_OperationSpecifier { + if m != nil { + return m.OperationSpecifier + } + return nil +} + +func (x *PathTransformation_Operation) GetNormalizePathRfc_3986() *PathTransformation_Operation_NormalizePathRFC3986 { + if x, ok := x.GetOperationSpecifier().(*PathTransformation_Operation_NormalizePathRfc_3986); ok { + return x.NormalizePathRfc_3986 + } + return nil +} + +func (x *PathTransformation_Operation) GetMergeSlashes() *PathTransformation_Operation_MergeSlashes { + if x, ok := x.GetOperationSpecifier().(*PathTransformation_Operation_MergeSlashes_); ok { + return x.MergeSlashes + } + return nil +} + +type isPathTransformation_Operation_OperationSpecifier interface { + isPathTransformation_Operation_OperationSpecifier() +} + +type PathTransformation_Operation_NormalizePathRfc_3986 struct { + // Enable path normalization per RFC 3986. + NormalizePathRfc_3986 *PathTransformation_Operation_NormalizePathRFC3986 `protobuf:"bytes,2,opt,name=normalize_path_rfc_3986,json=normalizePathRfc3986,proto3,oneof"` +} + +type PathTransformation_Operation_MergeSlashes_ struct { + // Enable merging adjacent slashes. + MergeSlashes *PathTransformation_Operation_MergeSlashes `protobuf:"bytes,3,opt,name=merge_slashes,json=mergeSlashes,proto3,oneof"` +} + +func (*PathTransformation_Operation_NormalizePathRfc_3986) isPathTransformation_Operation_OperationSpecifier() { +} + +func (*PathTransformation_Operation_MergeSlashes_) isPathTransformation_Operation_OperationSpecifier() { +} + +// Should text be normalized according to RFC 3986? This typically is used for path headers +// before any processing of requests by HTTP filters or routing. This applies percent-encoded +// normalization and path segment normalization. Fails on characters disallowed in URLs +// (e.g. NULLs). See `Normalization and Comparison +// `_ for details of normalization. Note that +// this options does not perform `case normalization +// `_ +type PathTransformation_Operation_NormalizePathRFC3986 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) Reset() { + *x = PathTransformation_Operation_NormalizePathRFC3986{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation_NormalizePathRFC3986) ProtoMessage() {} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation_NormalizePathRFC3986.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation_NormalizePathRFC3986) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// Determines if adjacent slashes are merged into one. A common use case is for a request path +// header. Using this option in “:ref: PathNormalizationOptions +// “ +// will allow incoming requests with path “//dir///file“ to match against route with “prefix“ +// match set to “/dir“. When using for header transformations, note that slash merging is not +// part of `HTTP spec `_ and is provided for convenience. +type PathTransformation_Operation_MergeSlashes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PathTransformation_Operation_MergeSlashes) Reset() { + *x = PathTransformation_Operation_MergeSlashes{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation_MergeSlashes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation_MergeSlashes) ProtoMessage() {} + +func (x *PathTransformation_Operation_MergeSlashes) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation_MergeSlashes.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation_MergeSlashes) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0, 1} +} + +var File_envoy_type_http_v3_path_transformation_proto protoreflect.FileDescriptor + +var file_envoy_type_http_v3_path_transformation_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x03, 0x0a, 0x12, 0x50, + 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x50, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0xb5, 0x02, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x7e, 0x0a, 0x17, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x72, 0x66, 0x63, 0x5f, 0x33, 0x39, 0x38, 0x36, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x52, 0x46, 0x43, 0x33, 0x39, 0x38, 0x36, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x66, 0x63, 0x33, 0x39, 0x38, + 0x36, 0x12, 0x64, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x1a, 0x16, 0x0a, 0x14, 0x4e, 0x6f, 0x72, 0x6d, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x46, 0x43, 0x33, 0x39, 0x38, 0x36, 0x1a, + 0x0e, 0x0a, 0x0c, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x42, + 0x1a, 0x0a, 0x13, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x87, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x68, + 0x74, 0x74, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_http_v3_path_transformation_proto_rawDescOnce sync.Once + file_envoy_type_http_v3_path_transformation_proto_rawDescData = file_envoy_type_http_v3_path_transformation_proto_rawDesc +) + +func file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP() []byte { + file_envoy_type_http_v3_path_transformation_proto_rawDescOnce.Do(func() { + file_envoy_type_http_v3_path_transformation_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_v3_path_transformation_proto_rawDescData) + }) + return file_envoy_type_http_v3_path_transformation_proto_rawDescData +} + +var file_envoy_type_http_v3_path_transformation_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_type_http_v3_path_transformation_proto_goTypes = []interface{}{ + (*PathTransformation)(nil), // 0: envoy.type.http.v3.PathTransformation + (*PathTransformation_Operation)(nil), // 1: envoy.type.http.v3.PathTransformation.Operation + (*PathTransformation_Operation_NormalizePathRFC3986)(nil), // 2: envoy.type.http.v3.PathTransformation.Operation.NormalizePathRFC3986 + (*PathTransformation_Operation_MergeSlashes)(nil), // 3: envoy.type.http.v3.PathTransformation.Operation.MergeSlashes +} +var file_envoy_type_http_v3_path_transformation_proto_depIdxs = []int32{ + 1, // 0: envoy.type.http.v3.PathTransformation.operations:type_name -> envoy.type.http.v3.PathTransformation.Operation + 2, // 1: envoy.type.http.v3.PathTransformation.Operation.normalize_path_rfc_3986:type_name -> envoy.type.http.v3.PathTransformation.Operation.NormalizePathRFC3986 + 3, // 2: envoy.type.http.v3.PathTransformation.Operation.merge_slashes:type_name -> envoy.type.http.v3.PathTransformation.Operation.MergeSlashes + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_http_v3_path_transformation_proto_init() } +func file_envoy_type_http_v3_path_transformation_proto_init() { + if File_envoy_type_http_v3_path_transformation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_http_v3_path_transformation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation_NormalizePathRFC3986); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation_MergeSlashes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PathTransformation_Operation_NormalizePathRfc_3986)(nil), + (*PathTransformation_Operation_MergeSlashes_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_http_v3_path_transformation_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_http_v3_path_transformation_proto_goTypes, + DependencyIndexes: file_envoy_type_http_v3_path_transformation_proto_depIdxs, + MessageInfos: file_envoy_type_http_v3_path_transformation_proto_msgTypes, + }.Build() + File_envoy_type_http_v3_path_transformation_proto = out.File + file_envoy_type_http_v3_path_transformation_proto_rawDesc = nil + file_envoy_type_http_v3_path_transformation_proto_goTypes = nil + file_envoy_type_http_v3_path_transformation_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go new file mode 100644 index 000000000..0bb35065b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go @@ -0,0 +1,595 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PathTransformation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PathTransformation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathTransformation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathTransformationMultiError, or nil if none found. +func (m *PathTransformation) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetOperations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return PathTransformationMultiError(errors) + } + + return nil +} + +// PathTransformationMultiError is an error wrapping multiple validation errors +// returned by PathTransformation.ValidateAll() if the designated constraints +// aren't met. +type PathTransformationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformationMultiError) AllErrors() []error { return m } + +// PathTransformationValidationError is the validation error returned by +// PathTransformation.Validate if the designated constraints aren't met. +type PathTransformationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformationValidationError) ErrorName() string { + return "PathTransformationValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformationValidationError{} + +// Validate checks the field values on PathTransformation_Operation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathTransformation_Operation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathTransformation_OperationMultiError, or nil if none found. +func (m *PathTransformation_Operation) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOperationSpecifierPresent := false + switch v := m.OperationSpecifier.(type) { + case *PathTransformation_Operation_NormalizePathRfc_3986: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true + + if all { + switch v := interface{}(m.GetNormalizePathRfc_3986()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNormalizePathRfc_3986()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PathTransformation_Operation_MergeSlashes_: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMergeSlashes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMergeSlashes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOperationSpecifierPresent { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PathTransformation_OperationMultiError(errors) + } + + return nil +} + +// PathTransformation_OperationMultiError is an error wrapping multiple +// validation errors returned by PathTransformation_Operation.ValidateAll() if +// the designated constraints aren't met. +type PathTransformation_OperationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_OperationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_OperationMultiError) AllErrors() []error { return m } + +// PathTransformation_OperationValidationError is the validation error returned +// by PathTransformation_Operation.Validate if the designated constraints +// aren't met. +type PathTransformation_OperationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_OperationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformation_OperationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformation_OperationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformation_OperationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_OperationValidationError) ErrorName() string { + return "PathTransformation_OperationValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_OperationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_OperationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_OperationValidationError{} + +// Validate checks the field values on +// PathTransformation_Operation_NormalizePathRFC3986 with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation_NormalizePathRFC3986) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// PathTransformation_Operation_NormalizePathRFC3986 with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// PathTransformation_Operation_NormalizePathRFC3986MultiError, or nil if none found. +func (m *PathTransformation_Operation_NormalizePathRFC3986) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return PathTransformation_Operation_NormalizePathRFC3986MultiError(errors) + } + + return nil +} + +// PathTransformation_Operation_NormalizePathRFC3986MultiError is an error +// wrapping multiple validation errors returned by +// PathTransformation_Operation_NormalizePathRFC3986.ValidateAll() if the +// designated constraints aren't met. +type PathTransformation_Operation_NormalizePathRFC3986MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_Operation_NormalizePathRFC3986MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_Operation_NormalizePathRFC3986MultiError) AllErrors() []error { return m } + +// PathTransformation_Operation_NormalizePathRFC3986ValidationError is the +// validation error returned by +// PathTransformation_Operation_NormalizePathRFC3986.Validate if the +// designated constraints aren't met. +type PathTransformation_Operation_NormalizePathRFC3986ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) ErrorName() string { + return "PathTransformation_Operation_NormalizePathRFC3986ValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation_NormalizePathRFC3986.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_Operation_NormalizePathRFC3986ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_Operation_NormalizePathRFC3986ValidationError{} + +// Validate checks the field values on +// PathTransformation_Operation_MergeSlashes with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation_MergeSlashes) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// PathTransformation_Operation_MergeSlashes with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// PathTransformation_Operation_MergeSlashesMultiError, or nil if none found. +func (m *PathTransformation_Operation_MergeSlashes) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation_MergeSlashes) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return PathTransformation_Operation_MergeSlashesMultiError(errors) + } + + return nil +} + +// PathTransformation_Operation_MergeSlashesMultiError is an error wrapping +// multiple validation errors returned by +// PathTransformation_Operation_MergeSlashes.ValidateAll() if the designated +// constraints aren't met. +type PathTransformation_Operation_MergeSlashesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_Operation_MergeSlashesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_Operation_MergeSlashesMultiError) AllErrors() []error { return m } + +// PathTransformation_Operation_MergeSlashesValidationError is the validation +// error returned by PathTransformation_Operation_MergeSlashes.Validate if the +// designated constraints aren't met. +type PathTransformation_Operation_MergeSlashesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_Operation_MergeSlashesValidationError) ErrorName() string { + return "PathTransformation_Operation_MergeSlashesValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_Operation_MergeSlashesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation_MergeSlashes.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_Operation_MergeSlashesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_Operation_MergeSlashesValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go new file mode 100644 index 000000000..64d8960cf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go @@ -0,0 +1,300 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_MergeSlashes_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_NormalizePathRfc_3986); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NormalizePathRfc_3986 != nil { + size, err := m.NormalizePathRfc_3986.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MergeSlashes != nil { + size, err := m.MergeSlashes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PathTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Operations[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_MergeSlashes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OperationSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NormalizePathRfc_3986 != nil { + l = m.NormalizePathRfc_3986.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation_Operation_MergeSlashes_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MergeSlashes != nil { + l = m.MergeSlashes.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, e := range m.Operations { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go new file mode 100644 index 000000000..db3bd5994 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// FilterStateMatcher provides a general interface for matching the filter state objects. +type FilterStateMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter state key to retrieve the object. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Types that are assignable to Matcher: + // + // *FilterStateMatcher_StringMatch + Matcher isFilterStateMatcher_Matcher `protobuf_oneof:"matcher"` +} + +func (x *FilterStateMatcher) Reset() { + *x = FilterStateMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterStateMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterStateMatcher) ProtoMessage() {} + +func (x *FilterStateMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterStateMatcher.ProtoReflect.Descriptor instead. +func (*FilterStateMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP(), []int{0} +} + +func (x *FilterStateMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (m *FilterStateMatcher) GetMatcher() isFilterStateMatcher_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *FilterStateMatcher) GetStringMatch() *StringMatcher { + if x, ok := x.GetMatcher().(*FilterStateMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +type isFilterStateMatcher_Matcher interface { + isFilterStateMatcher_Matcher() +} + +type FilterStateMatcher_StringMatch struct { + // Matches the filter state object as a string value. + StringMatch *StringMatcher `protobuf:"bytes,2,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +func (*FilterStateMatcher_StringMatch) isFilterStateMatcher_Matcher() {} + +var File_envoy_type_matcher_v3_filter_state_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_filter_state_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x01, + 0x0a, 0x12, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x89, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = file_envoy_type_matcher_v3_filter_state_proto_rawDesc +) + +func file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_filter_state_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_filter_state_proto_rawDescData +} + +var file_envoy_type_matcher_v3_filter_state_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_filter_state_proto_goTypes = []interface{}{ + (*FilterStateMatcher)(nil), // 0: envoy.type.matcher.v3.FilterStateMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_filter_state_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.FilterStateMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_filter_state_proto_init() } +func file_envoy_type_matcher_v3_filter_state_proto_init() { + if File_envoy_type_matcher_v3_filter_state_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterStateMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FilterStateMatcher_StringMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_filter_state_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_filter_state_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_filter_state_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_filter_state_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_filter_state_proto = out.File + file_envoy_type_matcher_v3_filter_state_proto_rawDesc = nil + file_envoy_type_matcher_v3_filter_state_proto_goTypes = nil + file_envoy_type_matcher_v3_filter_state_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go new file mode 100644 index 000000000..41a5f68db --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FilterStateMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterStateMatcherMultiError, or nil if none found. +func (m *FilterStateMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterStateMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := FilterStateMatcherValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *FilterStateMatcher_StringMatch: + if v == nil { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FilterStateMatcherMultiError(errors) + } + + return nil +} + +// FilterStateMatcherMultiError is an error wrapping multiple validation errors +// returned by FilterStateMatcher.ValidateAll() if the designated constraints +// aren't met. +type FilterStateMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterStateMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterStateMatcherMultiError) AllErrors() []error { return m } + +// FilterStateMatcherValidationError is the validation error returned by +// FilterStateMatcher.Validate if the designated constraints aren't met. +type FilterStateMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterStateMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterStateMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterStateMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterStateMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterStateMatcherValidationError) ErrorName() string { + return "FilterStateMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e FilterStateMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterStateMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterStateMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterStateMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go new file mode 100644 index 000000000..873f63eef --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FilterStateMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterStateMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*FilterStateMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterStateMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *FilterStateMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *FilterStateMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go new file mode 100644 index 000000000..a2f9c73ad --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go @@ -0,0 +1,452 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Match input indicates that matching should be done on a specific request header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_headers] +type HttpRequestHeaderMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request header to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpRequestHeaderMatchInput) Reset() { + *x = HttpRequestHeaderMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestHeaderMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestHeaderMatchInput) ProtoMessage() {} + +func (x *HttpRequestHeaderMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestHeaderMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestHeaderMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpRequestHeaderMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific request trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_trailers] +type HttpRequestTrailerMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request trailer to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpRequestTrailerMatchInput) Reset() { + *x = HttpRequestTrailerMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestTrailerMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestTrailerMatchInput) ProtoMessage() {} + +func (x *HttpRequestTrailerMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestTrailerMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestTrailerMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRequestTrailerMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicating that matching should be done on a specific response header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_headers] +type HttpResponseHeaderMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response header to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpResponseHeaderMatchInput) Reset() { + *x = HttpResponseHeaderMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseHeaderMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseHeaderMatchInput) ProtoMessage() {} + +func (x *HttpResponseHeaderMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseHeaderMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseHeaderMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpResponseHeaderMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific response trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_trailers] +type HttpResponseTrailerMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response trailer to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpResponseTrailerMatchInput) Reset() { + *x = HttpResponseTrailerMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseTrailerMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseTrailerMatchInput) ProtoMessage() {} + +func (x *HttpResponseTrailerMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseTrailerMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseTrailerMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpResponseTrailerMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific query parameter. +// The resulting input string will be the first query parameter for the value +// 'query_param'. +// [#extension: envoy.matching.inputs.query_params] +type HttpRequestQueryParamMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The query parameter to match on. + QueryParam string `protobuf:"bytes,1,opt,name=query_param,json=queryParam,proto3" json:"query_param,omitempty"` +} + +func (x *HttpRequestQueryParamMatchInput) Reset() { + *x = HttpRequestQueryParamMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestQueryParamMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestQueryParamMatchInput) ProtoMessage() {} + +func (x *HttpRequestQueryParamMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestQueryParamMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestQueryParamMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{4} +} + +func (x *HttpRequestQueryParamMatchInput) GetQueryParam() string { + if x != nil { + return x.QueryParam + } + return "" +} + +var File_envoy_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x1b, 0x48, 0x74, 0x74, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x22, 0x4d, 0x0a, 0x1d, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x4b, 0x0a, 0x1f, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x42, 0x88, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = file_envoy_type_matcher_v3_http_inputs_proto_rawDesc +) + +func file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_http_inputs_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescData +} + +var file_envoy_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{ + (*HttpRequestHeaderMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpRequestHeaderMatchInput + (*HttpRequestTrailerMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpRequestTrailerMatchInput + (*HttpResponseHeaderMatchInput)(nil), // 2: envoy.type.matcher.v3.HttpResponseHeaderMatchInput + (*HttpResponseTrailerMatchInput)(nil), // 3: envoy.type.matcher.v3.HttpResponseTrailerMatchInput + (*HttpRequestQueryParamMatchInput)(nil), // 4: envoy.type.matcher.v3.HttpRequestQueryParamMatchInput +} +var file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_http_inputs_proto_init() } +func file_envoy_type_matcher_v3_http_inputs_proto_init() { + if File_envoy_type_matcher_v3_http_inputs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestHeaderMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestTrailerMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseHeaderMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseTrailerMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestQueryParamMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_http_inputs_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_http_inputs_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_http_inputs_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_http_inputs_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_http_inputs_proto = out.File + file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = nil + file_envoy_type_matcher_v3_http_inputs_proto_goTypes = nil + file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go new file mode 100644 index 000000000..78de165bd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go @@ -0,0 +1,615 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpRequestHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestHeaderMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpRequestHeaderMatchInputMultiError, or nil if none found. +func (m *HttpRequestHeaderMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestHeaderMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpRequestHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpRequestHeaderMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestHeaderMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestHeaderMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestHeaderMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpRequestHeaderMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestHeaderMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestHeaderMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestHeaderMatchInputValidationError is the validation error returned +// by HttpRequestHeaderMatchInput.Validate if the designated constraints +// aren't met. +type HttpRequestHeaderMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestHeaderMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestHeaderMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestHeaderMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestHeaderMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestHeaderMatchInputValidationError) ErrorName() string { + return "HttpRequestHeaderMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestHeaderMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestHeaderMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestHeaderMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestHeaderMatchInputValidationError{} + +var _HttpRequestHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpRequestTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestTrailerMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpRequestTrailerMatchInputMultiError, or nil if none found. +func (m *HttpRequestTrailerMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestTrailerMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpRequestTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpRequestTrailerMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestTrailerMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestTrailerMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestTrailerMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpRequestTrailerMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestTrailerMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestTrailerMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestTrailerMatchInputValidationError is the validation error returned +// by HttpRequestTrailerMatchInput.Validate if the designated constraints +// aren't met. +type HttpRequestTrailerMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestTrailerMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestTrailerMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestTrailerMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestTrailerMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestTrailerMatchInputValidationError) ErrorName() string { + return "HttpRequestTrailerMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestTrailerMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestTrailerMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestTrailerMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestTrailerMatchInputValidationError{} + +var _HttpRequestTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpResponseHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpResponseHeaderMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpResponseHeaderMatchInputMultiError, or nil if none found. +func (m *HttpResponseHeaderMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseHeaderMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpResponseHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpResponseHeaderMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpResponseHeaderMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseHeaderMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpResponseHeaderMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpResponseHeaderMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseHeaderMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseHeaderMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseHeaderMatchInputValidationError is the validation error returned +// by HttpResponseHeaderMatchInput.Validate if the designated constraints +// aren't met. +type HttpResponseHeaderMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseHeaderMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseHeaderMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseHeaderMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseHeaderMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseHeaderMatchInputValidationError) ErrorName() string { + return "HttpResponseHeaderMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseHeaderMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseHeaderMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseHeaderMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseHeaderMatchInputValidationError{} + +var _HttpResponseHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpResponseTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpResponseTrailerMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseTrailerMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpResponseTrailerMatchInputMultiError, or nil if none found. +func (m *HttpResponseTrailerMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseTrailerMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpResponseTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpResponseTrailerMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpResponseTrailerMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseTrailerMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpResponseTrailerMatchInput.ValidateAll() +// if the designated constraints aren't met. +type HttpResponseTrailerMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseTrailerMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseTrailerMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseTrailerMatchInputValidationError is the validation error +// returned by HttpResponseTrailerMatchInput.Validate if the designated +// constraints aren't met. +type HttpResponseTrailerMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseTrailerMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseTrailerMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseTrailerMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseTrailerMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseTrailerMatchInputValidationError) ErrorName() string { + return "HttpResponseTrailerMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseTrailerMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseTrailerMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseTrailerMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseTrailerMatchInputValidationError{} + +var _HttpResponseTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpRequestQueryParamMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestQueryParamMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestQueryParamMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpRequestQueryParamMatchInputMultiError, or nil if none found. +func (m *HttpRequestQueryParamMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestQueryParamMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetQueryParam()) < 1 { + err := HttpRequestQueryParamMatchInputValidationError{ + field: "QueryParam", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestQueryParamMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestQueryParamMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestQueryParamMatchInput.ValidateAll() +// if the designated constraints aren't met. +type HttpRequestQueryParamMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestQueryParamMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestQueryParamMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestQueryParamMatchInputValidationError is the validation error +// returned by HttpRequestQueryParamMatchInput.Validate if the designated +// constraints aren't met. +type HttpRequestQueryParamMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestQueryParamMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestQueryParamMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestQueryParamMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestQueryParamMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestQueryParamMatchInputValidationError) ErrorName() string { + return "HttpRequestQueryParamMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestQueryParamMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestQueryParamMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestQueryParamMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestQueryParamMatchInputValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go new file mode 100644 index 000000000..ecf552dc0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go @@ -0,0 +1,289 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpRequestHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.QueryParam) > 0 { + i -= len(m.QueryParam) + copy(dAtA[i:], m.QueryParam) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.QueryParam))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestQueryParamMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QueryParam) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go new file mode 100644 index 000000000..14a093334 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go @@ -0,0 +1,303 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-major-version: MetadataMatcher should use StructMatcher] +type MetadataMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter name to retrieve the Struct from the Metadata. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The path to retrieve the Value from the Struct. + Path []*MetadataMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + // If true, the match result will be inverted. + Invert bool `protobuf:"varint,4,opt,name=invert,proto3" json:"invert,omitempty"` +} + +func (x *MetadataMatcher) Reset() { + *x = MetadataMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataMatcher) ProtoMessage() {} + +func (x *MetadataMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataMatcher.ProtoReflect.Descriptor instead. +func (*MetadataMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *MetadataMatcher) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *MetadataMatcher) GetPath() []*MetadataMatcher_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +func (x *MetadataMatcher) GetValue() *ValueMatcher { + if x != nil { + return x.Value + } + return nil +} + +func (x *MetadataMatcher) GetInvert() bool { + if x != nil { + return x.Invert + } + return false +} + +// Specifies the segment in a path to retrieve value from Metadata. +// Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that +// if the segment key refers to a list, it has to be the last segment in a path. +type MetadataMatcher_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *MetadataMatcher_PathSegment_Key + Segment isMetadataMatcher_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *MetadataMatcher_PathSegment) Reset() { + *x = MetadataMatcher_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataMatcher_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataMatcher_PathSegment) ProtoMessage() {} + +func (x *MetadataMatcher_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataMatcher_PathSegment.ProtoReflect.Descriptor instead. +func (*MetadataMatcher_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *MetadataMatcher_PathSegment) GetSegment() isMetadataMatcher_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *MetadataMatcher_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*MetadataMatcher_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isMetadataMatcher_PathSegment_Segment interface { + isMetadataMatcher_PathSegment_Segment() +} + +type MetadataMatcher_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*MetadataMatcher_PathSegment_Key) isMetadataMatcher_PathSegment_Segment() {} + +var File_envoy_type_matcher_v3_metadata_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_metadata_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xff, 0x02, 0x0a, 0x0f, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x50, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x76, 0x65, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x1a, + 0x71, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, + 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x86, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_metadata_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_metadata_proto_rawDescData = file_envoy_type_matcher_v3_metadata_proto_rawDesc +) + +func file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_metadata_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_metadata_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_metadata_proto_rawDescData +} + +var file_envoy_type_matcher_v3_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_metadata_proto_goTypes = []interface{}{ + (*MetadataMatcher)(nil), // 0: envoy.type.matcher.v3.MetadataMatcher + (*MetadataMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.MetadataMatcher.PathSegment + (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher +} +var file_envoy_type_matcher_v3_metadata_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.MetadataMatcher.path:type_name -> envoy.type.matcher.v3.MetadataMatcher.PathSegment + 2, // 1: envoy.type.matcher.v3.MetadataMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_metadata_proto_init() } +func file_envoy_type_matcher_v3_metadata_proto_init() { + if File_envoy_type_matcher_v3_metadata_proto != nil { + return + } + file_envoy_type_matcher_v3_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataMatcher_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MetadataMatcher_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_metadata_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_metadata_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_metadata_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_metadata_proto = out.File + file_envoy_type_matcher_v3_metadata_proto_rawDesc = nil + file_envoy_type_matcher_v3_metadata_proto_goTypes = nil + file_envoy_type_matcher_v3_metadata_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go new file mode 100644 index 000000000..27c898ee0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go @@ -0,0 +1,378 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MetadataMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MetadataMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataMatcherMultiError, or nil if none found. +func (m *MetadataMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetFilter()) < 1 { + err := MetadataMatcherValidationError{ + field: "Filter", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetPath()) < 1 { + err := MetadataMatcherValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetValue() == nil { + err := MetadataMatcherValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Invert + + if len(errors) > 0 { + return MetadataMatcherMultiError(errors) + } + + return nil +} + +// MetadataMatcherMultiError is an error wrapping multiple validation errors +// returned by MetadataMatcher.ValidateAll() if the designated constraints +// aren't met. +type MetadataMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMatcherMultiError) AllErrors() []error { return m } + +// MetadataMatcherValidationError is the validation error returned by +// MetadataMatcher.Validate if the designated constraints aren't met. +type MetadataMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataMatcherValidationError) ErrorName() string { return "MetadataMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataMatcherValidationError{} + +// Validate checks the field values on MetadataMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataMatcher_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataMatcher_PathSegmentMultiError, or nil if none found. +func (m *MetadataMatcher_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataMatcher_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *MetadataMatcher_PathSegment_Key: + if v == nil { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataMatcher_PathSegmentMultiError(errors) + } + + return nil +} + +// MetadataMatcher_PathSegmentMultiError is an error wrapping multiple +// validation errors returned by MetadataMatcher_PathSegment.ValidateAll() if +// the designated constraints aren't met. +type MetadataMatcher_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMatcher_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMatcher_PathSegmentMultiError) AllErrors() []error { return m } + +// MetadataMatcher_PathSegmentValidationError is the validation error returned +// by MetadataMatcher_PathSegment.Validate if the designated constraints +// aren't met. +type MetadataMatcher_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataMatcher_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataMatcher_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataMatcher_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataMatcher_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataMatcher_PathSegmentValidationError) ErrorName() string { + return "MetadataMatcher_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataMatcher_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataMatcher_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataMatcher_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataMatcher_PathSegmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go new file mode 100644 index 000000000..4050e14c2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go @@ -0,0 +1,195 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Invert { + i-- + if m.Invert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Invert { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go new file mode 100644 index 000000000..d6083cb27 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a Node. +// The match follows AND semantics. +type NodeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies match criteria on the node id. + NodeId *StringMatcher `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Specifies match criteria on the node metadata. + NodeMetadatas []*StructMatcher `protobuf:"bytes,2,rep,name=node_metadatas,json=nodeMetadatas,proto3" json:"node_metadatas,omitempty"` +} + +func (x *NodeMatcher) Reset() { + *x = NodeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeMatcher) ProtoMessage() {} + +func (x *NodeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeMatcher.ProtoReflect.Descriptor instead. +func (*NodeMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_node_proto_rawDescGZIP(), []int{0} +} + +func (x *NodeMatcher) GetNodeId() *StringMatcher { + if x != nil { + return x.NodeId + } + return nil +} + +func (x *NodeMatcher) GetNodeMetadatas() []*StructMatcher { + if x != nil { + return x.NodeMetadatas + } + return nil +} + +var File_envoy_type_matcher_v3_node_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_node_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x73, 0x3a, + 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_node_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_node_proto_rawDescData = file_envoy_type_matcher_v3_node_proto_rawDesc +) + +func file_envoy_type_matcher_v3_node_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_node_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_node_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_node_proto_rawDescData +} + +var file_envoy_type_matcher_v3_node_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_node_proto_goTypes = []interface{}{ + (*NodeMatcher)(nil), // 0: envoy.type.matcher.v3.NodeMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher + (*StructMatcher)(nil), // 2: envoy.type.matcher.v3.StructMatcher +} +var file_envoy_type_matcher_v3_node_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.NodeMatcher.node_id:type_name -> envoy.type.matcher.v3.StringMatcher + 2, // 1: envoy.type.matcher.v3.NodeMatcher.node_metadatas:type_name -> envoy.type.matcher.v3.StructMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_node_proto_init() } +func file_envoy_type_matcher_v3_node_proto_init() { + if File_envoy_type_matcher_v3_node_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + file_envoy_type_matcher_v3_struct_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_node_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_node_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_node_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_node_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_node_proto = out.File + file_envoy_type_matcher_v3_node_proto_rawDesc = nil + file_envoy_type_matcher_v3_node_proto_goTypes = nil + file_envoy_type_matcher_v3_node_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go new file mode 100644 index 000000000..62aa27f7a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go @@ -0,0 +1,199 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on NodeMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *NodeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NodeMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in NodeMatcherMultiError, or +// nil if none found. +func (m *NodeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *NodeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNodeId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNodeId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetNodeMetadatas() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return NodeMatcherMultiError(errors) + } + + return nil +} + +// NodeMatcherMultiError is an error wrapping multiple validation errors +// returned by NodeMatcher.ValidateAll() if the designated constraints aren't met. +type NodeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NodeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NodeMatcherMultiError) AllErrors() []error { return m } + +// NodeMatcherValidationError is the validation error returned by +// NodeMatcher.Validate if the designated constraints aren't met. +type NodeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NodeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NodeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NodeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NodeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NodeMatcherValidationError) ErrorName() string { return "NodeMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e NodeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNodeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NodeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NodeMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go new file mode 100644 index 000000000..3bea65da7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *NodeMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NodeMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NodeMetadatas) > 0 { + for iNdEx := len(m.NodeMetadatas) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NodeMetadatas[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.NodeId != nil { + size, err := m.NodeId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NodeMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeId != nil { + l = m.NodeId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NodeMetadatas) > 0 { + for _, e := range m.NodeMetadatas { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go new file mode 100644 index 000000000..2ad4bccfa --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a double value. +type DoubleMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *DoubleMatcher_Range + // *DoubleMatcher_Exact + MatchPattern isDoubleMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *DoubleMatcher) Reset() { + *x = DoubleMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleMatcher) ProtoMessage() {} + +func (x *DoubleMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleMatcher.ProtoReflect.Descriptor instead. +func (*DoubleMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_number_proto_rawDescGZIP(), []int{0} +} + +func (m *DoubleMatcher) GetMatchPattern() isDoubleMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *DoubleMatcher) GetRange() *v3.DoubleRange { + if x, ok := x.GetMatchPattern().(*DoubleMatcher_Range); ok { + return x.Range + } + return nil +} + +func (x *DoubleMatcher) GetExact() float64 { + if x, ok := x.GetMatchPattern().(*DoubleMatcher_Exact); ok { + return x.Exact + } + return 0 +} + +type isDoubleMatcher_MatchPattern interface { + isDoubleMatcher_MatchPattern() +} + +type DoubleMatcher_Range struct { + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + Range *v3.DoubleRange `protobuf:"bytes,1,opt,name=range,proto3,oneof"` +} + +type DoubleMatcher_Exact struct { + // If specified, the input double value must be equal to the value specified here. + Exact float64 `protobuf:"fixed64,2,opt,name=exact,proto3,oneof"` +} + +func (*DoubleMatcher_Range) isDoubleMatcher_MatchPattern() {} + +func (*DoubleMatcher_Exact) isDoubleMatcher_MatchPattern() {} + +var File_envoy_type_matcher_v3_number_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_number_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x3a, + 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x84, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_number_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_number_proto_rawDescData = file_envoy_type_matcher_v3_number_proto_rawDesc +) + +func file_envoy_type_matcher_v3_number_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_number_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_number_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_number_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_number_proto_rawDescData +} + +var file_envoy_type_matcher_v3_number_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_number_proto_goTypes = []interface{}{ + (*DoubleMatcher)(nil), // 0: envoy.type.matcher.v3.DoubleMatcher + (*v3.DoubleRange)(nil), // 1: envoy.type.v3.DoubleRange +} +var file_envoy_type_matcher_v3_number_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.DoubleMatcher.range:type_name -> envoy.type.v3.DoubleRange + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_number_proto_init() } +func file_envoy_type_matcher_v3_number_proto_init() { + if File_envoy_type_matcher_v3_number_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_number_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_number_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*DoubleMatcher_Range)(nil), + (*DoubleMatcher_Exact)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_number_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_number_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_number_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_number_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_number_proto = out.File + file_envoy_type_matcher_v3_number_proto_rawDesc = nil + file_envoy_type_matcher_v3_number_proto_goTypes = nil + file_envoy_type_matcher_v3_number_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go new file mode 100644 index 000000000..b019d7d01 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DoubleMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleMatcherMultiError, or +// nil if none found. +func (m *DoubleMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *DoubleMatcher_Range: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DoubleMatcher_Exact: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DoubleMatcherMultiError(errors) + } + + return nil +} + +// DoubleMatcherMultiError is an error wrapping multiple validation errors +// returned by DoubleMatcher.ValidateAll() if the designated constraints +// aren't met. +type DoubleMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleMatcherMultiError) AllErrors() []error { return m } + +// DoubleMatcherValidationError is the validation error returned by +// DoubleMatcher.Validate if the designated constraints aren't met. +type DoubleMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleMatcherValidationError) ErrorName() string { return "DoubleMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go new file mode 100644 index 000000000..7315258ab --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go @@ -0,0 +1,160 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DoubleMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Range); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DoubleMatcher_Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Range != nil { + if vtmsg, ok := interface{}(m.Range).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Range) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DoubleMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Exact)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *DoubleMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleMatcher_Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Range != nil { + if size, ok := interface{}(m.Range).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Range) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DoubleMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go new file mode 100644 index 000000000..aac680dbe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a path on HTTP request. +type PathMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *PathMatcher_Path + Rule isPathMatcher_Rule `protobuf_oneof:"rule"` +} + +func (x *PathMatcher) Reset() { + *x = PathMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathMatcher) ProtoMessage() {} + +func (x *PathMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathMatcher.ProtoReflect.Descriptor instead. +func (*PathMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_path_proto_rawDescGZIP(), []int{0} +} + +func (m *PathMatcher) GetRule() isPathMatcher_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *PathMatcher) GetPath() *StringMatcher { + if x, ok := x.GetRule().(*PathMatcher_Path); ok { + return x.Path + } + return nil +} + +type isPathMatcher_Rule interface { + isPathMatcher_Rule() +} + +type PathMatcher_Path struct { + // The “path“ must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path “/data“ will match the “:path“ header “/data#fragment?param=value“. + Path *StringMatcher `protobuf:"bytes,1,opt,name=path,proto3,oneof"` +} + +func (*PathMatcher_Path) isPathMatcher_Rule() {} + +var File_envoy_type_matcher_v3_path_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_path_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0b, 0x50, 0x61, 0x74, + 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x09, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_path_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_path_proto_rawDescData = file_envoy_type_matcher_v3_path_proto_rawDesc +) + +func file_envoy_type_matcher_v3_path_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_path_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_path_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_path_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_path_proto_rawDescData +} + +var file_envoy_type_matcher_v3_path_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_path_proto_goTypes = []interface{}{ + (*PathMatcher)(nil), // 0: envoy.type.matcher.v3.PathMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_path_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.PathMatcher.path:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_path_proto_init() } +func file_envoy_type_matcher_v3_path_proto_init() { + if File_envoy_type_matcher_v3_path_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_path_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_path_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*PathMatcher_Path)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_path_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_path_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_path_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_path_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_path_proto = out.File + file_envoy_type_matcher_v3_path_proto_rawDesc = nil + file_envoy_type_matcher_v3_path_proto_goTypes = nil + file_envoy_type_matcher_v3_path_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go new file mode 100644 index 000000000..a978c99ab --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go @@ -0,0 +1,205 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PathMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PathMatcherMultiError, or +// nil if none found. +func (m *PathMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *PathMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *PathMatcher_Path: + if v == nil { + err := PathMatcherValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetPath() == nil { + err := PathMatcherValidationError{ + field: "Path", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := PathMatcherValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PathMatcherMultiError(errors) + } + + return nil +} + +// PathMatcherMultiError is an error wrapping multiple validation errors +// returned by PathMatcher.ValidateAll() if the designated constraints aren't met. +type PathMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathMatcherMultiError) AllErrors() []error { return m } + +// PathMatcherValidationError is the validation error returned by +// PathMatcher.Validate if the designated constraints aren't met. +type PathMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathMatcherValidationError) ErrorName() string { return "PathMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e PathMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go new file mode 100644 index 000000000..044fe9db2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*PathMatcher_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathMatcher_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Path != nil { + size, err := m.Path.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *PathMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathMatcher_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go new file mode 100644 index 000000000..383bb267c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go @@ -0,0 +1,415 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A regex matcher designed for safety when used with untrusted input. +type RegexMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to EngineType: + // + // *RegexMatcher_GoogleRe2 + EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"` + // The regex match string. The string must be supported by the configured engine. The regex is matched + // against the full string, not as a partial match. + Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"` +} + +func (x *RegexMatcher) Reset() { + *x = RegexMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher) ProtoMessage() {} + +func (x *RegexMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead. +func (*RegexMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0} +} + +func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType { + if m != nil { + return m.EngineType + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. +func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 { + if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok { + return x.GoogleRe2 + } + return nil +} + +func (x *RegexMatcher) GetRegex() string { + if x != nil { + return x.Regex + } + return "" +} + +type isRegexMatcher_EngineType interface { + isRegexMatcher_EngineType() +} + +type RegexMatcher_GoogleRe2 struct { + // Google's RE2 regex engine. + // + // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. + GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"` +} + +func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +type RegexMatchAndSubstitute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + Pattern *RegexMatcher `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., “\1“ refers to capture group 1, and “\2“ refers + // to capture group 2. + Substitution string `protobuf:"bytes,2,opt,name=substitution,proto3" json:"substitution,omitempty"` +} + +func (x *RegexMatchAndSubstitute) Reset() { + *x = RegexMatchAndSubstitute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatchAndSubstitute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatchAndSubstitute) ProtoMessage() {} + +func (x *RegexMatchAndSubstitute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatchAndSubstitute.ProtoReflect.Descriptor instead. +func (*RegexMatchAndSubstitute) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{1} +} + +func (x *RegexMatchAndSubstitute) GetPattern() *RegexMatcher { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *RegexMatchAndSubstitute) GetSubstitution() string { + if x != nil { + return x.Substitution + } + return "" +} + +// Google's `RE2 `_ regex engine. The regex string must adhere to +// the documented `syntax `_. The engine is designed +// to complete execution in linear time as well as limit the amount of memory used. +// +// Envoy supports program size checking via runtime. The runtime keys “re2.max_program_size.error_level“ +// and “re2.max_program_size.warn_level“ can be set to integers as the maximum program size or +// complexity that a compiled regex can have before an exception is thrown or a warning is +// logged, respectively. “re2.max_program_size.error_level“ defaults to 100, and +// “re2.max_program_size.warn_level“ has no default if unset (will not check/log a warning). +// +// Envoy emits two stats for tracking the program size of regexes: the histogram “re2.program_size“, +// which records the program size, and the counter “re2.exceeded_warn_level“, which is incremented +// each time the program size exceeds the warn level threshold. +type RegexMatcher_GoogleRE2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field controls the RE2 "program size" which is a rough estimate of how complex a + // compiled regex is to evaluate. A regex that has a program size greater than the configured + // value will fail to compile. In this case, the configured max program size can be increased + // or the regex can be simplified. If not specified, the default is 100. + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + // + // .. note:: + // + // Although this field is deprecated, the program size will still be checked against the + // global ``re2.max_program_size.error_level`` runtime value. + // + // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. + MaxProgramSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"` +} + +func (x *RegexMatcher_GoogleRE2) Reset() { + *x = RegexMatcher_GoogleRE2{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher_GoogleRE2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher_GoogleRE2) ProtoMessage() {} + +func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead. +func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0} +} + +// Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. +func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxProgramSize + } + return nil +} + +var File_envoy_type_matcher_v3_regex_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_regex_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x02, 0x0a, 0x0c, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x0b, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x92, 0x01, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x52, 0x45, 0x32, 0x12, 0x53, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, + 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x3a, 0x26, 0x9a, 0xc5, + 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x12, + 0x47, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x2f, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, + 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x42, 0x83, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, + 0x67, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_regex_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_regex_proto_rawDescData = file_envoy_type_matcher_v3_regex_proto_rawDesc +) + +func file_envoy_type_matcher_v3_regex_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_regex_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_regex_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_regex_proto_rawDescData +} + +var file_envoy_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_matcher_v3_regex_proto_goTypes = []interface{}{ + (*RegexMatcher)(nil), // 0: envoy.type.matcher.v3.RegexMatcher + (*RegexMatchAndSubstitute)(nil), // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute + (*RegexMatcher_GoogleRE2)(nil), // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2 + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value +} +var file_envoy_type_matcher_v3_regex_proto_depIdxs = []int32{ + 2, // 0: envoy.type.matcher.v3.RegexMatcher.google_re2:type_name -> envoy.type.matcher.v3.RegexMatcher.GoogleRE2 + 0, // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute.pattern:type_name -> envoy.type.matcher.v3.RegexMatcher + 3, // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2.max_program_size:type_name -> google.protobuf.UInt32Value + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_regex_proto_init() } +func file_envoy_type_matcher_v3_regex_proto_init() { + if File_envoy_type_matcher_v3_regex_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatchAndSubstitute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher_GoogleRE2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RegexMatcher_GoogleRe2)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_regex_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_regex_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_regex_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_regex_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_regex_proto = out.File + file_envoy_type_matcher_v3_regex_proto_rawDesc = nil + file_envoy_type_matcher_v3_regex_proto_goTypes = nil + file_envoy_type_matcher_v3_regex_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go new file mode 100644 index 000000000..bb00d0cd7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go @@ -0,0 +1,479 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RegexMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RegexMatcherMultiError, or +// nil if none found. +func (m *RegexMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRegex()) < 1 { + err := RegexMatcherValidationError{ + field: "Regex", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.EngineType.(type) { + case *RegexMatcher_GoogleRe2: + if v == nil { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGoogleRe2()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RegexMatcherMultiError(errors) + } + + return nil +} + +// RegexMatcherMultiError is an error wrapping multiple validation errors +// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met. +type RegexMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcherMultiError) AllErrors() []error { return m } + +// RegexMatcherValidationError is the validation error returned by +// RegexMatcher.Validate if the designated constraints aren't met. +type RegexMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e RegexMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcherValidationError{} + +// Validate checks the field values on RegexMatchAndSubstitute with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatchAndSubstitute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatchAndSubstitute with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatchAndSubstituteMultiError, or nil if none found. +func (m *RegexMatchAndSubstitute) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatchAndSubstitute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPattern() == nil { + err := RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPattern()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPattern()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_RegexMatchAndSubstitute_Substitution_Pattern.MatchString(m.GetSubstitution()) { + err := RegexMatchAndSubstituteValidationError{ + field: "Substitution", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RegexMatchAndSubstituteMultiError(errors) + } + + return nil +} + +// RegexMatchAndSubstituteMultiError is an error wrapping multiple validation +// errors returned by RegexMatchAndSubstitute.ValidateAll() if the designated +// constraints aren't met. +type RegexMatchAndSubstituteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatchAndSubstituteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatchAndSubstituteMultiError) AllErrors() []error { return m } + +// RegexMatchAndSubstituteValidationError is the validation error returned by +// RegexMatchAndSubstitute.Validate if the designated constraints aren't met. +type RegexMatchAndSubstituteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatchAndSubstituteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatchAndSubstituteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatchAndSubstituteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatchAndSubstituteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatchAndSubstituteValidationError) ErrorName() string { + return "RegexMatchAndSubstituteValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatchAndSubstituteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatchAndSubstitute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatchAndSubstituteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatchAndSubstituteValidationError{} + +var _RegexMatchAndSubstitute_Substitution_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher_GoogleRE2) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatcher_GoogleRE2MultiError, or nil if none found. +func (m *RegexMatcher_GoogleRE2) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher_GoogleRE2) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxProgramSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxProgramSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RegexMatcher_GoogleRE2MultiError(errors) + } + + return nil +} + +// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation +// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated +// constraints aren't met. +type RegexMatcher_GoogleRE2MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcher_GoogleRE2MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m } + +// RegexMatcher_GoogleRE2ValidationError is the validation error returned by +// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met. +type RegexMatcher_GoogleRE2ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string { + return "RegexMatcher_GoogleRE2ValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatcher_GoogleRE2ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher_GoogleRE2.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcher_GoogleRE2ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcher_GoogleRE2ValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go new file mode 100644 index 000000000..234f07193 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go @@ -0,0 +1,246 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RegexMatcher_GoogleRE2) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher_GoogleRE2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRE2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxProgramSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxProgramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Regex) > 0 { + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.EngineType.(*RegexMatcher_GoogleRe2); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRe2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRe2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleRe2 != nil { + size, err := m.GoogleRe2.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RegexMatchAndSubstitute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatchAndSubstitute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatchAndSubstitute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Substitution) > 0 { + i -= len(m.Substitution) + copy(dAtA[i:], m.Substitution) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Substitution))) + i-- + dAtA[i] = 0x12 + } + if m.Pattern != nil { + size, err := m.Pattern.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRE2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxProgramSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxProgramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.EngineType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Regex) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher_GoogleRe2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleRe2 != nil { + l = m.GoogleRe2.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RegexMatchAndSubstitute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pattern != nil { + l = m.Pattern.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Substitution) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go new file mode 100644 index 000000000..3da1aae4e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Match input indicates that matching should be done on the response status +// code. +type HttpResponseStatusCodeMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeMatchInput) Reset() { + *x = HttpResponseStatusCodeMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{0} +} + +// Match input indicates that the matching should be done on the class of the +// response status code. For eg: 1xx, 2xx, 3xx, 4xx or 5xx. +type HttpResponseStatusCodeClassMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeClassMatchInput) Reset() { + *x = HttpResponseStatusCodeClassMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeClassMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeClassMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeClassMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeClassMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeClassMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{1} +} + +var File_envoy_type_matcher_v3_status_code_input_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x20, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x27, 0x0a, 0x25, 0x48, 0x74, 0x74, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x14, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = file_envoy_type_matcher_v3_status_code_input_proto_rawDesc +) + +func file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_status_code_input_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescData +} + +var file_envoy_type_matcher_v3_status_code_input_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_status_code_input_proto_goTypes = []interface{}{ + (*HttpResponseStatusCodeMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpResponseStatusCodeMatchInput + (*HttpResponseStatusCodeClassMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpResponseStatusCodeClassMatchInput +} +var file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_status_code_input_proto_init() } +func file_envoy_type_matcher_v3_status_code_input_proto_init() { + if File_envoy_type_matcher_v3_status_code_input_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeClassMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_status_code_input_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_status_code_input_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_status_code_input_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_status_code_input_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_status_code_input_proto = out.File + file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = nil + file_envoy_type_matcher_v3_status_code_input_proto_goTypes = nil + file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go new file mode 100644 index 000000000..b09b90c13 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go @@ -0,0 +1,247 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *HttpResponseStatusCodeMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeMatchInputMultiError is an error wrapping multiple +// validation errors returned by +// HttpResponseStatusCodeMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeMatchInput.Validate if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeMatchInputValidationError{} + +// Validate checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpResponseStatusCodeClassMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeClassMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeClassMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeClassMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeClassMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeClassMatchInputMultiError is an error wrapping +// multiple validation errors returned by +// HttpResponseStatusCodeClassMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeClassMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeClassMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeClassMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeClassMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeClassMatchInput.Validate if the +// designated constraints aren't met. +type HttpResponseStatusCodeClassMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeClassMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeClassMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeClassMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeClassMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeClassMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeClassMatchInputValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go new file mode 100644 index 000000000..156377f50 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go @@ -0,0 +1,104 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpResponseStatusCodeMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseStatusCodeClassMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go new file mode 100644 index 000000000..2ebed9084 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go @@ -0,0 +1,400 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a string. +// [#next-free-field: 9] +type StringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *StringMatcher_Exact + // *StringMatcher_Prefix + // *StringMatcher_Suffix + // *StringMatcher_SafeRegex + // *StringMatcher_Contains + // *StringMatcher_Custom + MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` + // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This + // has no effect for the safe_regex match. + // For example, the matcher “data“ will match both input string “Data“ and “data“ if set to true. + IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` +} + +func (x *StringMatcher) Reset() { + *x = StringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMatcher) ProtoMessage() {} + +func (x *StringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. +func (*StringMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{0} +} + +func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *StringMatcher) GetExact() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { + return x.Exact + } + return "" +} + +func (x *StringMatcher) GetPrefix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *StringMatcher) GetSuffix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { + return x.Suffix + } + return "" +} + +func (x *StringMatcher) GetSafeRegex() *RegexMatcher { + if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *StringMatcher) GetContains() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok { + return x.Contains + } + return "" +} + +func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig { + if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok { + return x.Custom + } + return nil +} + +func (x *StringMatcher) GetIgnoreCase() bool { + if x != nil { + return x.IgnoreCase + } + return false +} + +type isStringMatcher_MatchPattern interface { + isStringMatcher_MatchPattern() +} + +type StringMatcher_Exact struct { + // The input string must match exactly the string specified here. + // + // Examples: + // + // * “abc“ only matches the value “abc“. + Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` +} + +type StringMatcher_Prefix struct { + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “abc.xyz“ + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` +} + +type StringMatcher_Suffix struct { + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “xyz.abc“ + Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` +} + +type StringMatcher_SafeRegex struct { + // The input string must match the regular expression specified here. + SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type StringMatcher_Contains struct { + // The input string must have the substring specified here. + // Note: empty contains match is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “xyz.abc.def“ + Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` +} + +type StringMatcher_Custom struct { + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {} + +// Specifies a list of ways to match a string. +type ListStringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *ListStringMatcher) Reset() { + *x = ListStringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListStringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListStringMatcher) ProtoMessage() {} + +func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. +func (*ListStringMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{1} +} + +func (x *ListStringMatcher) GetPatterns() []*StringMatcher { + if x != nil { + return x.Patterns + } + return nil +} + +var File_envoy_type_matcher_v3_string_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_string_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, + 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, + 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, + 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_string_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_string_proto_rawDescData = file_envoy_type_matcher_v3_string_proto_rawDesc +) + +func file_envoy_type_matcher_v3_string_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_string_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_string_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_string_proto_rawDescData +} + +var file_envoy_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_string_proto_goTypes = []interface{}{ + (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher + (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher + (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher + (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig +} +var file_envoy_type_matcher_v3_string_proto_depIdxs = []int32{ + 2, // 0: envoy.type.matcher.v3.StringMatcher.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher + 3, // 1: envoy.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig + 0, // 2: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_string_proto_init() } +func file_envoy_type_matcher_v3_string_proto_init() { + if File_envoy_type_matcher_v3_string_proto != nil { + return + } + file_envoy_type_matcher_v3_regex_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StringMatcher_Exact)(nil), + (*StringMatcher_Prefix)(nil), + (*StringMatcher_Suffix)(nil), + (*StringMatcher_SafeRegex)(nil), + (*StringMatcher_Contains)(nil), + (*StringMatcher_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_string_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_string_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_string_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_string_proto = out.File + file_envoy_type_matcher_v3_string_proto_rawDesc = nil + file_envoy_type_matcher_v3_string_proto_goTypes = nil + file_envoy_type_matcher_v3_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go new file mode 100644 index 000000000..98e3925f6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go @@ -0,0 +1,482 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StringMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StringMatcherMultiError, or +// nil if none found. +func (m *StringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for IgnoreCase + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *StringMatcher_Exact: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + case *StringMatcher_Prefix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetPrefix()) < 1 { + err := StringMatcherValidationError{ + field: "Prefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Suffix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetSuffix()) < 1 { + err := StringMatcherValidationError{ + field: "Suffix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_SafeRegex: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if m.GetSafeRegex() == nil { + err := StringMatcherValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StringMatcher_Contains: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetContains()) < 1 { + err := StringMatcherValidationError{ + field: "Contains", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Custom: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetCustom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StringMatcherMultiError(errors) + } + + return nil +} + +// StringMatcherMultiError is an error wrapping multiple validation errors +// returned by StringMatcher.ValidateAll() if the designated constraints +// aren't met. +type StringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StringMatcherMultiError) AllErrors() []error { return m } + +// StringMatcherValidationError is the validation error returned by +// StringMatcher.Validate if the designated constraints aren't met. +type StringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StringMatcherValidationError{} + +// Validate checks the field values on ListStringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListStringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListStringMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListStringMatcherMultiError, or nil if none found. +func (m *ListStringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListStringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPatterns()) < 1 { + err := ListStringMatcherValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListStringMatcherMultiError(errors) + } + + return nil +} + +// ListStringMatcherMultiError is an error wrapping multiple validation errors +// returned by ListStringMatcher.ValidateAll() if the designated constraints +// aren't met. +type ListStringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListStringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListStringMatcherMultiError) AllErrors() []error { return m } + +// ListStringMatcherValidationError is the validation error returned by +// ListStringMatcher.Validate if the designated constraints aren't met. +type ListStringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListStringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListStringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListStringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListStringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListStringMatcherValidationError) ErrorName() string { + return "ListStringMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ListStringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListStringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListStringMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go new file mode 100644 index 000000000..9c016e2e7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go @@ -0,0 +1,370 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*StringMatcher_Custom); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Contains); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.IgnoreCase { + i-- + if m.IgnoreCase { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.MatchPattern.(*StringMatcher_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Suffix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Exact) + copy(dAtA[i:], m.Exact) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Exact))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StringMatcher_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *StringMatcher_Suffix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Suffix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *StringMatcher_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + size, err := m.SafeRegex.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *StringMatcher_Contains) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Contains) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Contains) + copy(dAtA[i:], m.Contains) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Contains))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *StringMatcher_Custom) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Custom) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Custom != nil { + if vtmsg, ok := interface{}(m.Custom).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Custom) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ListStringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListStringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IgnoreCase { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *StringMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Exact) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Suffix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Suffix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + l = m.SafeRegex.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StringMatcher_Contains) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Contains) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Custom) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Custom != nil { + if size, ok := interface{}(m.Custom).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Custom) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListStringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go new file mode 100644 index 000000000..ef844bc7f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go @@ -0,0 +1,329 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses “path“ to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +type StructMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path to retrieve the Value from the Struct. + Path []*StructMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + // The StructMatcher is matched if the value retrieved by path is matched to this value. + Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *StructMatcher) Reset() { + *x = StructMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructMatcher) ProtoMessage() {} + +func (x *StructMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructMatcher.ProtoReflect.Descriptor instead. +func (*StructMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *StructMatcher) GetPath() []*StructMatcher_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +func (x *StructMatcher) GetValue() *ValueMatcher { + if x != nil { + return x.Value + } + return nil +} + +// Specifies the segment in a path to retrieve value from Struct. +type StructMatcher_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *StructMatcher_PathSegment_Key + Segment isStructMatcher_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *StructMatcher_PathSegment) Reset() { + *x = StructMatcher_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructMatcher_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructMatcher_PathSegment) ProtoMessage() {} + +func (x *StructMatcher_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructMatcher_PathSegment.ProtoReflect.Descriptor instead. +func (*StructMatcher_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *StructMatcher_PathSegment) GetSegment() isStructMatcher_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *StructMatcher_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*StructMatcher_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isStructMatcher_PathSegment_Segment interface { + isStructMatcher_PathSegment_Segment() +} + +type StructMatcher_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*StructMatcher_PathSegment_Key) isStructMatcher_PathSegment_Segment() {} + +var File_envoy_type_matcher_v3_struct_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_struct_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x02, 0x0a, 0x0d, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x6f, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x33, 0x9a, 0xc5, + 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_struct_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_struct_proto_rawDescData = file_envoy_type_matcher_v3_struct_proto_rawDesc +) + +func file_envoy_type_matcher_v3_struct_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_struct_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_struct_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_struct_proto_rawDescData +} + +var file_envoy_type_matcher_v3_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_struct_proto_goTypes = []interface{}{ + (*StructMatcher)(nil), // 0: envoy.type.matcher.v3.StructMatcher + (*StructMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.StructMatcher.PathSegment + (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher +} +var file_envoy_type_matcher_v3_struct_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.StructMatcher.path:type_name -> envoy.type.matcher.v3.StructMatcher.PathSegment + 2, // 1: envoy.type.matcher.v3.StructMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_struct_proto_init() } +func file_envoy_type_matcher_v3_struct_proto_init() { + if File_envoy_type_matcher_v3_struct_proto != nil { + return + } + file_envoy_type_matcher_v3_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructMatcher_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*StructMatcher_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_struct_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_struct_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_struct_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_struct_proto = out.File + file_envoy_type_matcher_v3_struct_proto_rawDesc = nil + file_envoy_type_matcher_v3_struct_proto_goTypes = nil + file_envoy_type_matcher_v3_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go new file mode 100644 index 000000000..d69c1547f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go @@ -0,0 +1,364 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StructMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StructMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StructMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StructMatcherMultiError, or +// nil if none found. +func (m *StructMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StructMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPath()) < 1 { + err := StructMatcherValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetValue() == nil { + err := StructMatcherValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StructMatcherMultiError(errors) + } + + return nil +} + +// StructMatcherMultiError is an error wrapping multiple validation errors +// returned by StructMatcher.ValidateAll() if the designated constraints +// aren't met. +type StructMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StructMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StructMatcherMultiError) AllErrors() []error { return m } + +// StructMatcherValidationError is the validation error returned by +// StructMatcher.Validate if the designated constraints aren't met. +type StructMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StructMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StructMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StructMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StructMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StructMatcherValidationError) ErrorName() string { return "StructMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StructMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStructMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StructMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StructMatcherValidationError{} + +// Validate checks the field values on StructMatcher_PathSegment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *StructMatcher_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StructMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StructMatcher_PathSegmentMultiError, or nil if none found. +func (m *StructMatcher_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *StructMatcher_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *StructMatcher_PathSegment_Key: + if v == nil { + err := StructMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := StructMatcher_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := StructMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StructMatcher_PathSegmentMultiError(errors) + } + + return nil +} + +// StructMatcher_PathSegmentMultiError is an error wrapping multiple validation +// errors returned by StructMatcher_PathSegment.ValidateAll() if the +// designated constraints aren't met. +type StructMatcher_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StructMatcher_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StructMatcher_PathSegmentMultiError) AllErrors() []error { return m } + +// StructMatcher_PathSegmentValidationError is the validation error returned by +// StructMatcher_PathSegment.Validate if the designated constraints aren't met. +type StructMatcher_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StructMatcher_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StructMatcher_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StructMatcher_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StructMatcher_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StructMatcher_PathSegmentValidationError) ErrorName() string { + return "StructMatcher_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e StructMatcher_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStructMatcher_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StructMatcher_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StructMatcher_PathSegmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go new file mode 100644 index 000000000..d36052b83 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go @@ -0,0 +1,171 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StructMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*StructMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StructMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StructMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StructMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go new file mode 100644 index 000000000..7ba125cf3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go @@ -0,0 +1,552 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 8] +type ValueMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies how to match a value. + // + // Types that are assignable to MatchPattern: + // + // *ValueMatcher_NullMatch_ + // *ValueMatcher_DoubleMatch + // *ValueMatcher_StringMatch + // *ValueMatcher_BoolMatch + // *ValueMatcher_PresentMatch + // *ValueMatcher_ListMatch + // *ValueMatcher_OrMatch + MatchPattern isValueMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *ValueMatcher) Reset() { + *x = ValueMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValueMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValueMatcher) ProtoMessage() {} + +func (x *ValueMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValueMatcher.ProtoReflect.Descriptor instead. +func (*ValueMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0} +} + +func (m *ValueMatcher) GetMatchPattern() isValueMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *ValueMatcher) GetNullMatch() *ValueMatcher_NullMatch { + if x, ok := x.GetMatchPattern().(*ValueMatcher_NullMatch_); ok { + return x.NullMatch + } + return nil +} + +func (x *ValueMatcher) GetDoubleMatch() *DoubleMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_DoubleMatch); ok { + return x.DoubleMatch + } + return nil +} + +func (x *ValueMatcher) GetStringMatch() *StringMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *ValueMatcher) GetBoolMatch() bool { + if x, ok := x.GetMatchPattern().(*ValueMatcher_BoolMatch); ok { + return x.BoolMatch + } + return false +} + +func (x *ValueMatcher) GetPresentMatch() bool { + if x, ok := x.GetMatchPattern().(*ValueMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +func (x *ValueMatcher) GetListMatch() *ListMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_ListMatch); ok { + return x.ListMatch + } + return nil +} + +func (x *ValueMatcher) GetOrMatch() *OrMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_OrMatch); ok { + return x.OrMatch + } + return nil +} + +type isValueMatcher_MatchPattern interface { + isValueMatcher_MatchPattern() +} + +type ValueMatcher_NullMatch_ struct { + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch *ValueMatcher_NullMatch `protobuf:"bytes,1,opt,name=null_match,json=nullMatch,proto3,oneof"` +} + +type ValueMatcher_DoubleMatch struct { + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatch *DoubleMatcher `protobuf:"bytes,2,opt,name=double_match,json=doubleMatch,proto3,oneof"` +} + +type ValueMatcher_StringMatch struct { + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatch *StringMatcher `protobuf:"bytes,3,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type ValueMatcher_BoolMatch struct { + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + BoolMatch bool `protobuf:"varint,4,opt,name=bool_match,json=boolMatch,proto3,oneof"` +} + +type ValueMatcher_PresentMatch struct { + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + PresentMatch bool `protobuf:"varint,5,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +type ValueMatcher_ListMatch struct { + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatch *ListMatcher `protobuf:"bytes,6,opt,name=list_match,json=listMatch,proto3,oneof"` +} + +type ValueMatcher_OrMatch struct { + // If specified, a match occurs if and only if any of the alternatives in the match accept the value. + OrMatch *OrMatcher `protobuf:"bytes,7,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +func (*ValueMatcher_NullMatch_) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_DoubleMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_StringMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_BoolMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_PresentMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_ListMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_OrMatch) isValueMatcher_MatchPattern() {} + +// Specifies the way to match a list value. +type ListMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *ListMatcher_OneOf + MatchPattern isListMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *ListMatcher) Reset() { + *x = ListMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMatcher) ProtoMessage() {} + +func (x *ListMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMatcher.ProtoReflect.Descriptor instead. +func (*ListMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{1} +} + +func (m *ListMatcher) GetMatchPattern() isListMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *ListMatcher) GetOneOf() *ValueMatcher { + if x, ok := x.GetMatchPattern().(*ListMatcher_OneOf); ok { + return x.OneOf + } + return nil +} + +type isListMatcher_MatchPattern interface { + isListMatcher_MatchPattern() +} + +type ListMatcher_OneOf struct { + // If specified, at least one of the values in the list must match the value specified. + OneOf *ValueMatcher `protobuf:"bytes,1,opt,name=one_of,json=oneOf,proto3,oneof"` +} + +func (*ListMatcher_OneOf) isListMatcher_MatchPattern() {} + +// Specifies a list of alternatives for the match. +type OrMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValueMatchers []*ValueMatcher `protobuf:"bytes,1,rep,name=value_matchers,json=valueMatchers,proto3" json:"value_matchers,omitempty"` +} + +func (x *OrMatcher) Reset() { + *x = OrMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrMatcher) ProtoMessage() {} + +func (x *OrMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrMatcher.ProtoReflect.Descriptor instead. +func (*OrMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{2} +} + +func (x *OrMatcher) GetValueMatchers() []*ValueMatcher { + if x != nil { + return x.ValueMatchers + } + return nil +} + +// NullMatch is an empty message to specify a null value. +type ValueMatcher_NullMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValueMatcher_NullMatch) Reset() { + *x = ValueMatcher_NullMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValueMatcher_NullMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValueMatcher_NullMatch) ProtoMessage() {} + +func (x *ValueMatcher_NullMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValueMatcher_NullMatch.ProtoReflect.Descriptor instead. +func (*ValueMatcher_NullMatch) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0, 0} +} + +var File_envoy_type_matcher_v3_value_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_value_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x04, + 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, + 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x0a, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x3d, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x3d, 0x0a, 0x09, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x30, 0x9a, + 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, + 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x88, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3c, 0x0a, + 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x6e, 0x65, 0x4f, 0x66, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x61, 0x0a, 0x09, 0x4f, 0x72, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x0d, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x42, 0x83, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_value_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_value_proto_rawDescData = file_envoy_type_matcher_v3_value_proto_rawDesc +) + +func file_envoy_type_matcher_v3_value_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_value_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_value_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_value_proto_rawDescData +} + +var file_envoy_type_matcher_v3_value_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_type_matcher_v3_value_proto_goTypes = []interface{}{ + (*ValueMatcher)(nil), // 0: envoy.type.matcher.v3.ValueMatcher + (*ListMatcher)(nil), // 1: envoy.type.matcher.v3.ListMatcher + (*OrMatcher)(nil), // 2: envoy.type.matcher.v3.OrMatcher + (*ValueMatcher_NullMatch)(nil), // 3: envoy.type.matcher.v3.ValueMatcher.NullMatch + (*DoubleMatcher)(nil), // 4: envoy.type.matcher.v3.DoubleMatcher + (*StringMatcher)(nil), // 5: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_value_proto_depIdxs = []int32{ + 3, // 0: envoy.type.matcher.v3.ValueMatcher.null_match:type_name -> envoy.type.matcher.v3.ValueMatcher.NullMatch + 4, // 1: envoy.type.matcher.v3.ValueMatcher.double_match:type_name -> envoy.type.matcher.v3.DoubleMatcher + 5, // 2: envoy.type.matcher.v3.ValueMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // 3: envoy.type.matcher.v3.ValueMatcher.list_match:type_name -> envoy.type.matcher.v3.ListMatcher + 2, // 4: envoy.type.matcher.v3.ValueMatcher.or_match:type_name -> envoy.type.matcher.v3.OrMatcher + 0, // 5: envoy.type.matcher.v3.ListMatcher.one_of:type_name -> envoy.type.matcher.v3.ValueMatcher + 0, // 6: envoy.type.matcher.v3.OrMatcher.value_matchers:type_name -> envoy.type.matcher.v3.ValueMatcher + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_value_proto_init() } +func file_envoy_type_matcher_v3_value_proto_init() { + if File_envoy_type_matcher_v3_value_proto != nil { + return + } + file_envoy_type_matcher_v3_number_proto_init() + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValueMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValueMatcher_NullMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ValueMatcher_NullMatch_)(nil), + (*ValueMatcher_DoubleMatch)(nil), + (*ValueMatcher_StringMatch)(nil), + (*ValueMatcher_BoolMatch)(nil), + (*ValueMatcher_PresentMatch)(nil), + (*ValueMatcher_ListMatch)(nil), + (*ValueMatcher_OrMatch)(nil), + } + file_envoy_type_matcher_v3_value_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ListMatcher_OneOf)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_value_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_value_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_value_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_value_proto = out.File + file_envoy_type_matcher_v3_value_proto_rawDesc = nil + file_envoy_type_matcher_v3_value_proto_goTypes = nil + file_envoy_type_matcher_v3_value_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go new file mode 100644 index 000000000..9814aa0db --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go @@ -0,0 +1,791 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ValueMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ValueMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValueMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ValueMatcherMultiError, or +// nil if none found. +func (m *ValueMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ValueMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *ValueMatcher_NullMatch_: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetNullMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNullMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_DoubleMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetDoubleMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDoubleMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_StringMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_BoolMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for BoolMatch + case *ValueMatcher_PresentMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for PresentMatch + case *ValueMatcher_ListMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetListMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_OrMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ValueMatcherMultiError(errors) + } + + return nil +} + +// ValueMatcherMultiError is an error wrapping multiple validation errors +// returned by ValueMatcher.ValidateAll() if the designated constraints aren't met. +type ValueMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValueMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValueMatcherMultiError) AllErrors() []error { return m } + +// ValueMatcherValidationError is the validation error returned by +// ValueMatcher.Validate if the designated constraints aren't met. +type ValueMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValueMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValueMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValueMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValueMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValueMatcherValidationError) ErrorName() string { return "ValueMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e ValueMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValueMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValueMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValueMatcherValidationError{} + +// Validate checks the field values on ListMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListMatcherMultiError, or +// nil if none found. +func (m *ListMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *ListMatcher_OneOf: + if v == nil { + err := ListMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetOneOf()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOneOf()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := ListMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ListMatcherMultiError(errors) + } + + return nil +} + +// ListMatcherMultiError is an error wrapping multiple validation errors +// returned by ListMatcher.ValidateAll() if the designated constraints aren't met. +type ListMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListMatcherMultiError) AllErrors() []error { return m } + +// ListMatcherValidationError is the validation error returned by +// ListMatcher.Validate if the designated constraints aren't met. +type ListMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListMatcherValidationError) ErrorName() string { return "ListMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e ListMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListMatcherValidationError{} + +// Validate checks the field values on OrMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrMatcherMultiError, or nil +// if none found. +func (m *OrMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *OrMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetValueMatchers()) < 2 { + err := OrMatcherValidationError{ + field: "ValueMatchers", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetValueMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return OrMatcherMultiError(errors) + } + + return nil +} + +// OrMatcherMultiError is an error wrapping multiple validation errors returned +// by OrMatcher.ValidateAll() if the designated constraints aren't met. +type OrMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrMatcherMultiError) AllErrors() []error { return m } + +// OrMatcherValidationError is the validation error returned by +// OrMatcher.Validate if the designated constraints aren't met. +type OrMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrMatcherValidationError) ErrorName() string { return "OrMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e OrMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrMatcherValidationError{} + +// Validate checks the field values on ValueMatcher_NullMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ValueMatcher_NullMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValueMatcher_NullMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ValueMatcher_NullMatchMultiError, or nil if none found. +func (m *ValueMatcher_NullMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *ValueMatcher_NullMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ValueMatcher_NullMatchMultiError(errors) + } + + return nil +} + +// ValueMatcher_NullMatchMultiError is an error wrapping multiple validation +// errors returned by ValueMatcher_NullMatch.ValidateAll() if the designated +// constraints aren't met. +type ValueMatcher_NullMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValueMatcher_NullMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValueMatcher_NullMatchMultiError) AllErrors() []error { return m } + +// ValueMatcher_NullMatchValidationError is the validation error returned by +// ValueMatcher_NullMatch.Validate if the designated constraints aren't met. +type ValueMatcher_NullMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValueMatcher_NullMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValueMatcher_NullMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValueMatcher_NullMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValueMatcher_NullMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValueMatcher_NullMatchValidationError) ErrorName() string { + return "ValueMatcher_NullMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e ValueMatcher_NullMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValueMatcher_NullMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValueMatcher_NullMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValueMatcher_NullMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go new file mode 100644 index 000000000..852f5cead --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go @@ -0,0 +1,545 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ValueMatcher_NullMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher_NullMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ValueMatcher_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_ListMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_BoolMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_DoubleMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_NullMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NullMatch != nil { + size, err := m.NullMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_DoubleMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_DoubleMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleMatch != nil { + size, err := m.DoubleMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_BoolMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_BoolMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_ListMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_ListMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListMatch != nil { + size, err := m.ListMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *ListMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ListMatcher_OneOf); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListMatcher_OneOf) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher_OneOf) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OneOf != nil { + size, err := m.OneOf.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *OrMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ValueMatchers) > 0 { + for iNdEx := len(m.ValueMatchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ValueMatchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher_NullMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NullMatch != nil { + l = m.NullMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_DoubleMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleMatch != nil { + l = m.DoubleMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_BoolMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_ListMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListMatch != nil { + l = m.ListMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListMatcher_OneOf) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OneOf != nil { + l = m.OneOf.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValueMatchers) > 0 { + for _, e := range m.ValueMatchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go new file mode 100644 index 000000000..7a6ac07a5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go @@ -0,0 +1,683 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MetadataKey provides a general interface using “key“ and “path“ to retrieve value from +// :ref:`Metadata `. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.xxx: +// prop: +// foo: bar +// xyz: +// hello: envoy +// +// The following MetadataKey will retrieve a string value "bar" from the Metadata. +// +// .. code-block:: yaml +// +// key: envoy.xxx +// path: +// - key: prop +// - key: foo +type MetadataKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key name of Metadata to retrieve the Struct from the metadata. + // Typically, it represents a builtin subsystem or custom extension. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The path to retrieve the Value from the Struct. It can be a prefix or a full path, + // e.g. “[prop, xyz]“ for a struct or “[prop, foo]“ for a string in the example, + // which depends on the particular scenario. + // + // Note: Due to that only the key type segment is supported, the path can not specify a list + // unless the list is the last segment. + Path []*MetadataKey_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` +} + +func (x *MetadataKey) Reset() { + *x = MetadataKey{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKey) ProtoMessage() {} + +func (x *MetadataKey) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKey.ProtoReflect.Descriptor instead. +func (*MetadataKey) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *MetadataKey) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataKey) GetPath() []*MetadataKey_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +// Describes what kind of metadata. +type MetadataKind struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *MetadataKind_Request_ + // *MetadataKind_Route_ + // *MetadataKind_Cluster_ + // *MetadataKind_Host_ + Kind isMetadataKind_Kind `protobuf_oneof:"kind"` +} + +func (x *MetadataKind) Reset() { + *x = MetadataKind{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind) ProtoMessage() {} + +func (x *MetadataKind) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind.ProtoReflect.Descriptor instead. +func (*MetadataKind) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1} +} + +func (m *MetadataKind) GetKind() isMetadataKind_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *MetadataKind) GetRequest() *MetadataKind_Request { + if x, ok := x.GetKind().(*MetadataKind_Request_); ok { + return x.Request + } + return nil +} + +func (x *MetadataKind) GetRoute() *MetadataKind_Route { + if x, ok := x.GetKind().(*MetadataKind_Route_); ok { + return x.Route + } + return nil +} + +func (x *MetadataKind) GetCluster() *MetadataKind_Cluster { + if x, ok := x.GetKind().(*MetadataKind_Cluster_); ok { + return x.Cluster + } + return nil +} + +func (x *MetadataKind) GetHost() *MetadataKind_Host { + if x, ok := x.GetKind().(*MetadataKind_Host_); ok { + return x.Host + } + return nil +} + +type isMetadataKind_Kind interface { + isMetadataKind_Kind() +} + +type MetadataKind_Request_ struct { + // Request kind of metadata. + Request *MetadataKind_Request `protobuf:"bytes,1,opt,name=request,proto3,oneof"` +} + +type MetadataKind_Route_ struct { + // Route kind of metadata. + Route *MetadataKind_Route `protobuf:"bytes,2,opt,name=route,proto3,oneof"` +} + +type MetadataKind_Cluster_ struct { + // Cluster kind of metadata. + Cluster *MetadataKind_Cluster `protobuf:"bytes,3,opt,name=cluster,proto3,oneof"` +} + +type MetadataKind_Host_ struct { + // Host kind of metadata. + Host *MetadataKind_Host `protobuf:"bytes,4,opt,name=host,proto3,oneof"` +} + +func (*MetadataKind_Request_) isMetadataKind_Kind() {} + +func (*MetadataKind_Route_) isMetadataKind_Kind() {} + +func (*MetadataKind_Cluster_) isMetadataKind_Kind() {} + +func (*MetadataKind_Host_) isMetadataKind_Kind() {} + +// Specifies the segment in a path to retrieve value from Metadata. +// Currently it is only supported to specify the key, i.e. field name, as one segment of a path. +type MetadataKey_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *MetadataKey_PathSegment_Key + Segment isMetadataKey_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *MetadataKey_PathSegment) Reset() { + *x = MetadataKey_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKey_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKey_PathSegment) ProtoMessage() {} + +func (x *MetadataKey_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKey_PathSegment.ProtoReflect.Descriptor instead. +func (*MetadataKey_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *MetadataKey_PathSegment) GetSegment() isMetadataKey_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *MetadataKey_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*MetadataKey_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isMetadataKey_PathSegment_Segment interface { + isMetadataKey_PathSegment_Segment() +} + +type MetadataKey_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*MetadataKey_PathSegment_Key) isMetadataKey_PathSegment_Segment() {} + +// Represents dynamic metadata associated with the request. +type MetadataKind_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Request) Reset() { + *x = MetadataKind_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Request) ProtoMessage() {} + +func (x *MetadataKind_Request) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Request.ProtoReflect.Descriptor instead. +func (*MetadataKind_Request) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 0} +} + +// Represents metadata from :ref:`the route`. +type MetadataKind_Route struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Route) Reset() { + *x = MetadataKind_Route{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Route) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Route) ProtoMessage() {} + +func (x *MetadataKind_Route) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Route.ProtoReflect.Descriptor instead. +func (*MetadataKind_Route) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 1} +} + +// Represents metadata from :ref:`the upstream cluster`. +type MetadataKind_Cluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Cluster) Reset() { + *x = MetadataKind_Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Cluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Cluster) ProtoMessage() {} + +func (x *MetadataKind_Cluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Cluster.ProtoReflect.Descriptor instead. +func (*MetadataKind_Cluster) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 2} +} + +// Represents metadata from :ref:`the upstream +// host`. +type MetadataKind_Host struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Host) Reset() { + *x = MetadataKind_Host{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Host) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Host) ProtoMessage() {} + +func (x *MetadataKind_Host) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Host.ProtoReflect.Descriptor instead. +func (*MetadataKind_Host) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 3} +} + +var File_envoy_type_metadata_v3_metadata_proto protoreflect.FileDescriptor + +var file_envoy_type_metadata_v3_metadata_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x0b, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x1a, 0x71, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x65, 0x79, 0x22, 0xd2, 0x04, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x69, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, + 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, + 0x6e, 0x64, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x12, 0x48, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, + 0x48, 0x6f, 0x73, 0x74, 0x48, 0x00, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x1a, 0x3d, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x69, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x0a, 0x05, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x1a, 0x3d, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x2f, 0x9a, + 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x2a, + 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x42, 0x0b, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x89, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_metadata_v3_metadata_proto_rawDescOnce sync.Once + file_envoy_type_metadata_v3_metadata_proto_rawDescData = file_envoy_type_metadata_v3_metadata_proto_rawDesc +) + +func file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP() []byte { + file_envoy_type_metadata_v3_metadata_proto_rawDescOnce.Do(func() { + file_envoy_type_metadata_v3_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_metadata_v3_metadata_proto_rawDescData) + }) + return file_envoy_type_metadata_v3_metadata_proto_rawDescData +} + +var file_envoy_type_metadata_v3_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_type_metadata_v3_metadata_proto_goTypes = []interface{}{ + (*MetadataKey)(nil), // 0: envoy.type.metadata.v3.MetadataKey + (*MetadataKind)(nil), // 1: envoy.type.metadata.v3.MetadataKind + (*MetadataKey_PathSegment)(nil), // 2: envoy.type.metadata.v3.MetadataKey.PathSegment + (*MetadataKind_Request)(nil), // 3: envoy.type.metadata.v3.MetadataKind.Request + (*MetadataKind_Route)(nil), // 4: envoy.type.metadata.v3.MetadataKind.Route + (*MetadataKind_Cluster)(nil), // 5: envoy.type.metadata.v3.MetadataKind.Cluster + (*MetadataKind_Host)(nil), // 6: envoy.type.metadata.v3.MetadataKind.Host +} +var file_envoy_type_metadata_v3_metadata_proto_depIdxs = []int32{ + 2, // 0: envoy.type.metadata.v3.MetadataKey.path:type_name -> envoy.type.metadata.v3.MetadataKey.PathSegment + 3, // 1: envoy.type.metadata.v3.MetadataKind.request:type_name -> envoy.type.metadata.v3.MetadataKind.Request + 4, // 2: envoy.type.metadata.v3.MetadataKind.route:type_name -> envoy.type.metadata.v3.MetadataKind.Route + 5, // 3: envoy.type.metadata.v3.MetadataKind.cluster:type_name -> envoy.type.metadata.v3.MetadataKind.Cluster + 6, // 4: envoy.type.metadata.v3.MetadataKind.host:type_name -> envoy.type.metadata.v3.MetadataKind.Host + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_type_metadata_v3_metadata_proto_init() } +func file_envoy_type_metadata_v3_metadata_proto_init() { + if File_envoy_type_metadata_v3_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_metadata_v3_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKey_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Route); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Host); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MetadataKind_Request_)(nil), + (*MetadataKind_Route_)(nil), + (*MetadataKind_Cluster_)(nil), + (*MetadataKind_Host_)(nil), + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*MetadataKey_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_metadata_v3_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_metadata_v3_metadata_proto_goTypes, + DependencyIndexes: file_envoy_type_metadata_v3_metadata_proto_depIdxs, + MessageInfos: file_envoy_type_metadata_v3_metadata_proto_msgTypes, + }.Build() + File_envoy_type_metadata_v3_metadata_proto = out.File + file_envoy_type_metadata_v3_metadata_proto_rawDesc = nil + file_envoy_type_metadata_v3_metadata_proto_goTypes = nil + file_envoy_type_metadata_v3_metadata_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go new file mode 100644 index 000000000..adc8c8ed5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go @@ -0,0 +1,1025 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MetadataKey with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataKey) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKey with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataKeyMultiError, or +// nil if none found. +func (m *MetadataKey) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKey) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataKeyValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetPath()) < 1 { + err := MetadataKeyValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MetadataKeyMultiError(errors) + } + + return nil +} + +// MetadataKeyMultiError is an error wrapping multiple validation errors +// returned by MetadataKey.ValidateAll() if the designated constraints aren't met. +type MetadataKeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKeyMultiError) AllErrors() []error { return m } + +// MetadataKeyValidationError is the validation error returned by +// MetadataKey.Validate if the designated constraints aren't met. +type MetadataKeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKeyValidationError) ErrorName() string { return "MetadataKeyValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataKeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKey.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKeyValidationError{} + +// Validate checks the field values on MetadataKind with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataKind) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataKindMultiError, or +// nil if none found. +func (m *MetadataKind) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofKindPresent := false + switch v := m.Kind.(type) { + case *MetadataKind_Request_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Route_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetRoute()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoute()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Cluster_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Host_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofKindPresent { + err := MetadataKindValidationError{ + field: "Kind", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataKindMultiError(errors) + } + + return nil +} + +// MetadataKindMultiError is an error wrapping multiple validation errors +// returned by MetadataKind.ValidateAll() if the designated constraints aren't met. +type MetadataKindMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKindMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKindMultiError) AllErrors() []error { return m } + +// MetadataKindValidationError is the validation error returned by +// MetadataKind.Validate if the designated constraints aren't met. +type MetadataKindValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKindValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKindValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKindValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKindValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKindValidationError) ErrorName() string { return "MetadataKindValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataKindValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKindValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKindValidationError{} + +// Validate checks the field values on MetadataKey_PathSegment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKey_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKey_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKey_PathSegmentMultiError, or nil if none found. +func (m *MetadataKey_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKey_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *MetadataKey_PathSegment_Key: + if v == nil { + err := MetadataKey_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataKey_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := MetadataKey_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataKey_PathSegmentMultiError(errors) + } + + return nil +} + +// MetadataKey_PathSegmentMultiError is an error wrapping multiple validation +// errors returned by MetadataKey_PathSegment.ValidateAll() if the designated +// constraints aren't met. +type MetadataKey_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKey_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKey_PathSegmentMultiError) AllErrors() []error { return m } + +// MetadataKey_PathSegmentValidationError is the validation error returned by +// MetadataKey_PathSegment.Validate if the designated constraints aren't met. +type MetadataKey_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKey_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKey_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKey_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKey_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKey_PathSegmentValidationError) ErrorName() string { + return "MetadataKey_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKey_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKey_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKey_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKey_PathSegmentValidationError{} + +// Validate checks the field values on MetadataKind_Request with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Request) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Request with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_RequestMultiError, or nil if none found. +func (m *MetadataKind_Request) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Request) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_RequestMultiError(errors) + } + + return nil +} + +// MetadataKind_RequestMultiError is an error wrapping multiple validation +// errors returned by MetadataKind_Request.ValidateAll() if the designated +// constraints aren't met. +type MetadataKind_RequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_RequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_RequestMultiError) AllErrors() []error { return m } + +// MetadataKind_RequestValidationError is the validation error returned by +// MetadataKind_Request.Validate if the designated constraints aren't met. +type MetadataKind_RequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_RequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_RequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_RequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_RequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_RequestValidationError) ErrorName() string { + return "MetadataKind_RequestValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_RequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Request.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_RequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_RequestValidationError{} + +// Validate checks the field values on MetadataKind_Route with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Route) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Route with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_RouteMultiError, or nil if none found. +func (m *MetadataKind_Route) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Route) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_RouteMultiError(errors) + } + + return nil +} + +// MetadataKind_RouteMultiError is an error wrapping multiple validation errors +// returned by MetadataKind_Route.ValidateAll() if the designated constraints +// aren't met. +type MetadataKind_RouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_RouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_RouteMultiError) AllErrors() []error { return m } + +// MetadataKind_RouteValidationError is the validation error returned by +// MetadataKind_Route.Validate if the designated constraints aren't met. +type MetadataKind_RouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_RouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_RouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_RouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_RouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_RouteValidationError) ErrorName() string { + return "MetadataKind_RouteValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_RouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Route.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_RouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_RouteValidationError{} + +// Validate checks the field values on MetadataKind_Cluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Cluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Cluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_ClusterMultiError, or nil if none found. +func (m *MetadataKind_Cluster) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Cluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_ClusterMultiError(errors) + } + + return nil +} + +// MetadataKind_ClusterMultiError is an error wrapping multiple validation +// errors returned by MetadataKind_Cluster.ValidateAll() if the designated +// constraints aren't met. +type MetadataKind_ClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_ClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_ClusterMultiError) AllErrors() []error { return m } + +// MetadataKind_ClusterValidationError is the validation error returned by +// MetadataKind_Cluster.Validate if the designated constraints aren't met. +type MetadataKind_ClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_ClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_ClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_ClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_ClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_ClusterValidationError) ErrorName() string { + return "MetadataKind_ClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_ClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Cluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_ClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_ClusterValidationError{} + +// Validate checks the field values on MetadataKind_Host with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Host) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Host with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_HostMultiError, or nil if none found. +func (m *MetadataKind_Host) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Host) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_HostMultiError(errors) + } + + return nil +} + +// MetadataKind_HostMultiError is an error wrapping multiple validation errors +// returned by MetadataKind_Host.ValidateAll() if the designated constraints +// aren't met. +type MetadataKind_HostMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_HostMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_HostMultiError) AllErrors() []error { return m } + +// MetadataKind_HostValidationError is the validation error returned by +// MetadataKind_Host.Validate if the designated constraints aren't met. +type MetadataKind_HostValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_HostValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_HostValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_HostValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_HostValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_HostValidationError) ErrorName() string { + return "MetadataKind_HostValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_HostValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Host.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_HostValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_HostValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go new file mode 100644 index 000000000..efbf1efc3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go @@ -0,0 +1,563 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataKey_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataKey_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKey_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Request) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Host) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Host) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Kind.(*MetadataKind_Host_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Cluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Route_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Request_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Route_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Cluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cluster != nil { + size, err := m.Cluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Host_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Host != nil { + size, err := m.Host.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *MetadataKey_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKey_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Host) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Kind.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Route_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Cluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Host_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Host != nil { + l = m.Host.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go new file mode 100644 index 000000000..388e4749e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go @@ -0,0 +1,609 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes custom tags for the active span. +// [#next-free-field: 6] +type CustomTag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Used to populate the tag name. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Used to specify what kind of custom tag. + // + // Types that are assignable to Type: + // + // *CustomTag_Literal_ + // *CustomTag_Environment_ + // *CustomTag_RequestHeader + // *CustomTag_Metadata_ + Type isCustomTag_Type `protobuf_oneof:"type"` +} + +func (x *CustomTag) Reset() { + *x = CustomTag{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag) ProtoMessage() {} + +func (x *CustomTag) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag.ProtoReflect.Descriptor instead. +func (*CustomTag) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomTag) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (m *CustomTag) GetType() isCustomTag_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *CustomTag) GetLiteral() *CustomTag_Literal { + if x, ok := x.GetType().(*CustomTag_Literal_); ok { + return x.Literal + } + return nil +} + +func (x *CustomTag) GetEnvironment() *CustomTag_Environment { + if x, ok := x.GetType().(*CustomTag_Environment_); ok { + return x.Environment + } + return nil +} + +func (x *CustomTag) GetRequestHeader() *CustomTag_Header { + if x, ok := x.GetType().(*CustomTag_RequestHeader); ok { + return x.RequestHeader + } + return nil +} + +func (x *CustomTag) GetMetadata() *CustomTag_Metadata { + if x, ok := x.GetType().(*CustomTag_Metadata_); ok { + return x.Metadata + } + return nil +} + +type isCustomTag_Type interface { + isCustomTag_Type() +} + +type CustomTag_Literal_ struct { + // A literal custom tag. + Literal *CustomTag_Literal `protobuf:"bytes,2,opt,name=literal,proto3,oneof"` +} + +type CustomTag_Environment_ struct { + // An environment custom tag. + Environment *CustomTag_Environment `protobuf:"bytes,3,opt,name=environment,proto3,oneof"` +} + +type CustomTag_RequestHeader struct { + // A request header custom tag. + RequestHeader *CustomTag_Header `protobuf:"bytes,4,opt,name=request_header,json=requestHeader,proto3,oneof"` +} + +type CustomTag_Metadata_ struct { + // A custom tag to obtain tag value from the metadata. + Metadata *CustomTag_Metadata `protobuf:"bytes,5,opt,name=metadata,proto3,oneof"` +} + +func (*CustomTag_Literal_) isCustomTag_Type() {} + +func (*CustomTag_Environment_) isCustomTag_Type() {} + +func (*CustomTag_RequestHeader) isCustomTag_Type() {} + +func (*CustomTag_Metadata_) isCustomTag_Type() {} + +// Literal type custom tag with static value for the tag value. +type CustomTag_Literal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Static literal value to populate the tag value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *CustomTag_Literal) Reset() { + *x = CustomTag_Literal{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Literal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Literal) ProtoMessage() {} + +func (x *CustomTag_Literal) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Literal.ProtoReflect.Descriptor instead. +func (*CustomTag_Literal) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CustomTag_Literal) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Environment type custom tag with environment name and default value. +type CustomTag_Environment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Environment variable name to obtain the value to populate the tag value. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // When the environment variable is not found, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Environment) Reset() { + *x = CustomTag_Environment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Environment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Environment) ProtoMessage() {} + +func (x *CustomTag_Environment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Environment.ProtoReflect.Descriptor instead. +func (*CustomTag_Environment) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *CustomTag_Environment) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CustomTag_Environment) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Header type custom tag with header name and default value. +type CustomTag_Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name to obtain the value to populate the tag value. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // When the header does not exist, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Header) Reset() { + *x = CustomTag_Header{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Header) ProtoMessage() {} + +func (x *CustomTag_Header) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Header.ProtoReflect.Descriptor instead. +func (*CustomTag_Header) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *CustomTag_Header) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CustomTag_Header) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Metadata type custom tag using +// :ref:`MetadataKey ` to retrieve the protobuf value +// from :ref:`Metadata `, and populate the tag value with +// `the canonical JSON `_ +// representation of it. +type CustomTag_Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify what kind of metadata to obtain tag value from. + Kind *v3.MetadataKind `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Metadata key to define the path to retrieve the tag value. + MetadataKey *v3.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // When no valid metadata is found, + // the tag value would be populated with this default value if specified, + // otherwise no tag would be populated. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Metadata) Reset() { + *x = CustomTag_Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Metadata) ProtoMessage() {} + +func (x *CustomTag_Metadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Metadata.ProtoReflect.Descriptor instead. +func (*CustomTag_Metadata) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *CustomTag_Metadata) GetKind() *v3.MetadataKind { + if x != nil { + return x.Kind + } + return nil +} + +func (x *CustomTag_Metadata) GetMetadataKey() *v3.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *CustomTag_Metadata) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +var File_envoy_type_tracing_v3_custom_tag_proto protoreflect.FileDescriptor + +var file_envoy_type_tracing_v3_custom_tag_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, + 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x61, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, + 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xd4, 0x07, 0x0a, 0x09, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x12, + 0x19, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x44, 0x0a, 0x07, 0x6c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x12, 0x50, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x50, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x58, 0x0a, + 0x07, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, + 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x83, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, + 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x7f, 0x0a, + 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xe2, + 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x52, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x42, 0x0b, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x87, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_tracing_v3_custom_tag_proto_rawDescOnce sync.Once + file_envoy_type_tracing_v3_custom_tag_proto_rawDescData = file_envoy_type_tracing_v3_custom_tag_proto_rawDesc +) + +func file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP() []byte { + file_envoy_type_tracing_v3_custom_tag_proto_rawDescOnce.Do(func() { + file_envoy_type_tracing_v3_custom_tag_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_tracing_v3_custom_tag_proto_rawDescData) + }) + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescData +} + +var file_envoy_type_tracing_v3_custom_tag_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_type_tracing_v3_custom_tag_proto_goTypes = []interface{}{ + (*CustomTag)(nil), // 0: envoy.type.tracing.v3.CustomTag + (*CustomTag_Literal)(nil), // 1: envoy.type.tracing.v3.CustomTag.Literal + (*CustomTag_Environment)(nil), // 2: envoy.type.tracing.v3.CustomTag.Environment + (*CustomTag_Header)(nil), // 3: envoy.type.tracing.v3.CustomTag.Header + (*CustomTag_Metadata)(nil), // 4: envoy.type.tracing.v3.CustomTag.Metadata + (*v3.MetadataKind)(nil), // 5: envoy.type.metadata.v3.MetadataKind + (*v3.MetadataKey)(nil), // 6: envoy.type.metadata.v3.MetadataKey +} +var file_envoy_type_tracing_v3_custom_tag_proto_depIdxs = []int32{ + 1, // 0: envoy.type.tracing.v3.CustomTag.literal:type_name -> envoy.type.tracing.v3.CustomTag.Literal + 2, // 1: envoy.type.tracing.v3.CustomTag.environment:type_name -> envoy.type.tracing.v3.CustomTag.Environment + 3, // 2: envoy.type.tracing.v3.CustomTag.request_header:type_name -> envoy.type.tracing.v3.CustomTag.Header + 4, // 3: envoy.type.tracing.v3.CustomTag.metadata:type_name -> envoy.type.tracing.v3.CustomTag.Metadata + 5, // 4: envoy.type.tracing.v3.CustomTag.Metadata.kind:type_name -> envoy.type.metadata.v3.MetadataKind + 6, // 5: envoy.type.tracing.v3.CustomTag.Metadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_type_tracing_v3_custom_tag_proto_init() } +func file_envoy_type_tracing_v3_custom_tag_proto_init() { + if File_envoy_type_tracing_v3_custom_tag_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Literal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Environment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CustomTag_Literal_)(nil), + (*CustomTag_Environment_)(nil), + (*CustomTag_RequestHeader)(nil), + (*CustomTag_Metadata_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_tracing_v3_custom_tag_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_tracing_v3_custom_tag_proto_goTypes, + DependencyIndexes: file_envoy_type_tracing_v3_custom_tag_proto_depIdxs, + MessageInfos: file_envoy_type_tracing_v3_custom_tag_proto_msgTypes, + }.Build() + File_envoy_type_tracing_v3_custom_tag_proto = out.File + file_envoy_type_tracing_v3_custom_tag_proto_rawDesc = nil + file_envoy_type_tracing_v3_custom_tag_proto_goTypes = nil + file_envoy_type_tracing_v3_custom_tag_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go new file mode 100644 index 000000000..d15de9b68 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go @@ -0,0 +1,847 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CustomTag with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CustomTag) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CustomTagMultiError, or nil +// if none found. +func (m *CustomTag) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetTag()) < 1 { + err := CustomTagValidationError{ + field: "Tag", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofTypePresent := false + switch v := m.Type.(type) { + case *CustomTag_Literal_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetLiteral()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLiteral()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_Environment_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetEnvironment()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvironment()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_RequestHeader: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetRequestHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_Metadata_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := CustomTagValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomTagMultiError(errors) + } + + return nil +} + +// CustomTagMultiError is an error wrapping multiple validation errors returned +// by CustomTag.ValidateAll() if the designated constraints aren't met. +type CustomTagMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTagMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTagMultiError) AllErrors() []error { return m } + +// CustomTagValidationError is the validation error returned by +// CustomTag.Validate if the designated constraints aren't met. +type CustomTagValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTagValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTagValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTagValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTagValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTagValidationError) ErrorName() string { return "CustomTagValidationError" } + +// Error satisfies the builtin error interface +func (e CustomTagValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTagValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTagValidationError{} + +// Validate checks the field values on CustomTag_Literal with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Literal) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Literal with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_LiteralMultiError, or nil if none found. +func (m *CustomTag_Literal) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Literal) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetValue()) < 1 { + err := CustomTag_LiteralValidationError{ + field: "Value", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomTag_LiteralMultiError(errors) + } + + return nil +} + +// CustomTag_LiteralMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Literal.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_LiteralMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_LiteralMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_LiteralMultiError) AllErrors() []error { return m } + +// CustomTag_LiteralValidationError is the validation error returned by +// CustomTag_Literal.Validate if the designated constraints aren't met. +type CustomTag_LiteralValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_LiteralValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_LiteralValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_LiteralValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_LiteralValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_LiteralValidationError) ErrorName() string { + return "CustomTag_LiteralValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_LiteralValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Literal.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_LiteralValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_LiteralValidationError{} + +// Validate checks the field values on CustomTag_Environment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Environment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Environment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_EnvironmentMultiError, or nil if none found. +func (m *CustomTag_Environment) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Environment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CustomTag_EnvironmentValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_EnvironmentMultiError(errors) + } + + return nil +} + +// CustomTag_EnvironmentMultiError is an error wrapping multiple validation +// errors returned by CustomTag_Environment.ValidateAll() if the designated +// constraints aren't met. +type CustomTag_EnvironmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_EnvironmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_EnvironmentMultiError) AllErrors() []error { return m } + +// CustomTag_EnvironmentValidationError is the validation error returned by +// CustomTag_Environment.Validate if the designated constraints aren't met. +type CustomTag_EnvironmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_EnvironmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_EnvironmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_EnvironmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_EnvironmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_EnvironmentValidationError) ErrorName() string { + return "CustomTag_EnvironmentValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_EnvironmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Environment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_EnvironmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_EnvironmentValidationError{} + +// Validate checks the field values on CustomTag_Header with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Header) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Header with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_HeaderMultiError, or nil if none found. +func (m *CustomTag_Header) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Header) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CustomTag_HeaderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_CustomTag_Header_Name_Pattern.MatchString(m.GetName()) { + err := CustomTag_HeaderValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_HeaderMultiError(errors) + } + + return nil +} + +// CustomTag_HeaderMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Header.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_HeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_HeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_HeaderMultiError) AllErrors() []error { return m } + +// CustomTag_HeaderValidationError is the validation error returned by +// CustomTag_Header.Validate if the designated constraints aren't met. +type CustomTag_HeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_HeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_HeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_HeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_HeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_HeaderValidationError) ErrorName() string { return "CustomTag_HeaderValidationError" } + +// Error satisfies the builtin error interface +func (e CustomTag_HeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Header.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_HeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_HeaderValidationError{} + +var _CustomTag_Header_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on CustomTag_Metadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Metadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_MetadataMultiError, or nil if none found. +func (m *CustomTag_Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetKind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_MetadataMultiError(errors) + } + + return nil +} + +// CustomTag_MetadataMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Metadata.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_MetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_MetadataMultiError) AllErrors() []error { return m } + +// CustomTag_MetadataValidationError is the validation error returned by +// CustomTag_Metadata.Validate if the designated constraints aren't met. +type CustomTag_MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_MetadataValidationError) ErrorName() string { + return "CustomTag_MetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Metadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_MetadataValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go new file mode 100644 index 000000000..e558c5d07 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go @@ -0,0 +1,556 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CustomTag_Literal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Literal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Environment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Environment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Kind != nil { + if vtmsg, ok := interface{}(m.Kind).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Kind) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*CustomTag_Metadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_RequestHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Environment_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Literal_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Tag) > 0 { + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Literal_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Literal != nil { + size, err := m.Literal.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Environment_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Environment != nil { + size, err := m.Environment.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_RequestHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_RequestHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeader != nil { + size, err := m.RequestHeader.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Metadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Literal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Environment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + if size, ok := interface{}(m.Kind).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Kind) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Literal_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Literal != nil { + l = m.Literal.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Environment_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Environment != nil { + l = m.Environment.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_RequestHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeader != nil { + l = m.RequestHeader.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Metadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go new file mode 100644 index 000000000..af620911f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go @@ -0,0 +1,333 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the hash policy +type HashPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PolicySpecifier: + // + // *HashPolicy_SourceIp_ + // *HashPolicy_FilterState_ + PolicySpecifier isHashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"` +} + +func (x *HashPolicy) Reset() { + *x = HashPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy) ProtoMessage() {} + +func (x *HashPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy.ProtoReflect.Descriptor instead. +func (*HashPolicy) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0} +} + +func (m *HashPolicy) GetPolicySpecifier() isHashPolicy_PolicySpecifier { + if m != nil { + return m.PolicySpecifier + } + return nil +} + +func (x *HashPolicy) GetSourceIp() *HashPolicy_SourceIp { + if x, ok := x.GetPolicySpecifier().(*HashPolicy_SourceIp_); ok { + return x.SourceIp + } + return nil +} + +func (x *HashPolicy) GetFilterState() *HashPolicy_FilterState { + if x, ok := x.GetPolicySpecifier().(*HashPolicy_FilterState_); ok { + return x.FilterState + } + return nil +} + +type isHashPolicy_PolicySpecifier interface { + isHashPolicy_PolicySpecifier() +} + +type HashPolicy_SourceIp_ struct { + SourceIp *HashPolicy_SourceIp `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp,proto3,oneof"` +} + +type HashPolicy_FilterState_ struct { + FilterState *HashPolicy_FilterState `protobuf:"bytes,2,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +func (*HashPolicy_SourceIp_) isHashPolicy_PolicySpecifier() {} + +func (*HashPolicy_FilterState_) isHashPolicy_PolicySpecifier() {} + +// The source IP will be used to compute the hash used by hash-based load balancing +// algorithms. +type HashPolicy_SourceIp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HashPolicy_SourceIp) Reset() { + *x = HashPolicy_SourceIp{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy_SourceIp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy_SourceIp) ProtoMessage() {} + +func (x *HashPolicy_SourceIp) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy_SourceIp.ProtoReflect.Descriptor instead. +func (*HashPolicy_SourceIp) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0, 0} +} + +// An Object in the :ref:`filterState ` will be used +// to compute the hash used by hash-based load balancing algorithms. +type HashPolicy_FilterState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Object in the filterState, which is an Envoy::Hashable object. If there is no + // data associated with the key, or the stored object is not Envoy::Hashable, no hash will be + // produced. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *HashPolicy_FilterState) Reset() { + *x = HashPolicy_FilterState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy_FilterState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy_FilterState) ProtoMessage() {} + +func (x *HashPolicy_FilterState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy_FilterState.ProtoReflect.Descriptor instead. +func (*HashPolicy_FilterState) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *HashPolicy_FilterState) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +var File_envoy_type_v3_hash_policy_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_hash_policy_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x02, 0x0a, 0x0a, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x09, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, + 0x70, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x4a, 0x0a, + 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x31, 0x0a, 0x08, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x1a, 0x28, 0x0a, 0x0b, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x75, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, + 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_hash_policy_proto_rawDescOnce sync.Once + file_envoy_type_v3_hash_policy_proto_rawDescData = file_envoy_type_v3_hash_policy_proto_rawDesc +) + +func file_envoy_type_v3_hash_policy_proto_rawDescGZIP() []byte { + file_envoy_type_v3_hash_policy_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_hash_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_hash_policy_proto_rawDescData) + }) + return file_envoy_type_v3_hash_policy_proto_rawDescData +} + +var file_envoy_type_v3_hash_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_v3_hash_policy_proto_goTypes = []interface{}{ + (*HashPolicy)(nil), // 0: envoy.type.v3.HashPolicy + (*HashPolicy_SourceIp)(nil), // 1: envoy.type.v3.HashPolicy.SourceIp + (*HashPolicy_FilterState)(nil), // 2: envoy.type.v3.HashPolicy.FilterState +} +var file_envoy_type_v3_hash_policy_proto_depIdxs = []int32{ + 1, // 0: envoy.type.v3.HashPolicy.source_ip:type_name -> envoy.type.v3.HashPolicy.SourceIp + 2, // 1: envoy.type.v3.HashPolicy.filter_state:type_name -> envoy.type.v3.HashPolicy.FilterState + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_hash_policy_proto_init() } +func file_envoy_type_v3_hash_policy_proto_init() { + if File_envoy_type_v3_hash_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_hash_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy_SourceIp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy_FilterState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HashPolicy_SourceIp_)(nil), + (*HashPolicy_FilterState_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_hash_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_hash_policy_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_hash_policy_proto_depIdxs, + MessageInfos: file_envoy_type_v3_hash_policy_proto_msgTypes, + }.Build() + File_envoy_type_v3_hash_policy_proto = out.File + file_envoy_type_v3_hash_policy_proto_rawDesc = nil + file_envoy_type_v3_hash_policy_proto_goTypes = nil + file_envoy_type_v3_hash_policy_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go new file mode 100644 index 000000000..5ec37f540 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go @@ -0,0 +1,451 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HashPolicy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HashPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HashPolicyMultiError, or +// nil if none found. +func (m *HashPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { + case *HashPolicy_SourceIp_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetSourceIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HashPolicy_FilterState_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HashPolicyMultiError(errors) + } + + return nil +} + +// HashPolicyMultiError is an error wrapping multiple validation errors +// returned by HashPolicy.ValidateAll() if the designated constraints aren't met. +type HashPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicyMultiError) AllErrors() []error { return m } + +// HashPolicyValidationError is the validation error returned by +// HashPolicy.Validate if the designated constraints aren't met. +type HashPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicyValidationError) ErrorName() string { return "HashPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e HashPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicyValidationError{} + +// Validate checks the field values on HashPolicy_SourceIp with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HashPolicy_SourceIp) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy_SourceIp with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HashPolicy_SourceIpMultiError, or nil if none found. +func (m *HashPolicy_SourceIp) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy_SourceIp) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HashPolicy_SourceIpMultiError(errors) + } + + return nil +} + +// HashPolicy_SourceIpMultiError is an error wrapping multiple validation +// errors returned by HashPolicy_SourceIp.ValidateAll() if the designated +// constraints aren't met. +type HashPolicy_SourceIpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicy_SourceIpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicy_SourceIpMultiError) AllErrors() []error { return m } + +// HashPolicy_SourceIpValidationError is the validation error returned by +// HashPolicy_SourceIp.Validate if the designated constraints aren't met. +type HashPolicy_SourceIpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicy_SourceIpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicy_SourceIpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicy_SourceIpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicy_SourceIpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicy_SourceIpValidationError) ErrorName() string { + return "HashPolicy_SourceIpValidationError" +} + +// Error satisfies the builtin error interface +func (e HashPolicy_SourceIpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy_SourceIp.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicy_SourceIpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicy_SourceIpValidationError{} + +// Validate checks the field values on HashPolicy_FilterState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HashPolicy_FilterState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy_FilterState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HashPolicy_FilterStateMultiError, or nil if none found. +func (m *HashPolicy_FilterState) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy_FilterState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := HashPolicy_FilterStateValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HashPolicy_FilterStateMultiError(errors) + } + + return nil +} + +// HashPolicy_FilterStateMultiError is an error wrapping multiple validation +// errors returned by HashPolicy_FilterState.ValidateAll() if the designated +// constraints aren't met. +type HashPolicy_FilterStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicy_FilterStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicy_FilterStateMultiError) AllErrors() []error { return m } + +// HashPolicy_FilterStateValidationError is the validation error returned by +// HashPolicy_FilterState.Validate if the designated constraints aren't met. +type HashPolicy_FilterStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicy_FilterStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicy_FilterStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicy_FilterStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicy_FilterStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicy_FilterStateValidationError) ErrorName() string { + return "HashPolicy_FilterStateValidationError" +} + +// Error satisfies the builtin error interface +func (e HashPolicy_FilterStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy_FilterState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicy_FilterStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicy_FilterStateValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go new file mode 100644 index 000000000..bcc199596 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go @@ -0,0 +1,251 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HashPolicy_SourceIp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_SourceIp_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_SourceIp_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + size, err := m.SourceIp.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_SourceIp_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + l = m.SourceIp.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go new file mode 100644 index 000000000..74f4e24df --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/http.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CodecClientType int32 + +const ( + CodecClientType_HTTP1 CodecClientType = 0 + CodecClientType_HTTP2 CodecClientType = 1 + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + CodecClientType_HTTP3 CodecClientType = 2 +) + +// Enum value maps for CodecClientType. +var ( + CodecClientType_name = map[int32]string{ + 0: "HTTP1", + 1: "HTTP2", + 2: "HTTP3", + } + CodecClientType_value = map[string]int32{ + "HTTP1": 0, + "HTTP2": 1, + "HTTP3": 2, + } +) + +func (x CodecClientType) Enum() *CodecClientType { + p := new(CodecClientType) + *p = x + return p +} + +func (x CodecClientType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodecClientType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_http_proto_enumTypes[0].Descriptor() +} + +func (CodecClientType) Type() protoreflect.EnumType { + return &file_envoy_type_v3_http_proto_enumTypes[0] +} + +func (x CodecClientType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CodecClientType.Descriptor instead. +func (CodecClientType) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_http_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_type_v3_http_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_http_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, 0x32, 0x0a, 0x0f, 0x43, 0x6f, 0x64, 0x65, + 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x31, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x02, 0x42, 0x6f, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_http_proto_rawDescOnce sync.Once + file_envoy_type_v3_http_proto_rawDescData = file_envoy_type_v3_http_proto_rawDesc +) + +func file_envoy_type_v3_http_proto_rawDescGZIP() []byte { + file_envoy_type_v3_http_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_proto_rawDescData) + }) + return file_envoy_type_v3_http_proto_rawDescData +} + +var file_envoy_type_v3_http_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_http_proto_goTypes = []interface{}{ + (CodecClientType)(0), // 0: envoy.type.v3.CodecClientType +} +var file_envoy_type_v3_http_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_http_proto_init() } +func file_envoy_type_v3_http_proto_init() { + if File_envoy_type_v3_http_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_http_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_http_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_http_proto_depIdxs, + EnumInfos: file_envoy_type_v3_http_proto_enumTypes, + }.Build() + File_envoy_type_v3_http_proto = out.File + file_envoy_type_v3_http_proto_rawDesc = nil + file_envoy_type_v3_http_proto_goTypes = nil + file_envoy_type_v3_http_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go new file mode 100644 index 000000000..e2c41e26f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/http.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go new file mode 100644 index 000000000..f7e952b3a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go @@ -0,0 +1,458 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HTTP response codes supported in Envoy. +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +type StatusCode int32 + +const ( + // Empty - This code not part of the HTTP status code specification, but it is needed for proto + // `enum` type. + StatusCode_Empty StatusCode = 0 + StatusCode_Continue StatusCode = 100 + StatusCode_OK StatusCode = 200 + StatusCode_Created StatusCode = 201 + StatusCode_Accepted StatusCode = 202 + StatusCode_NonAuthoritativeInformation StatusCode = 203 + StatusCode_NoContent StatusCode = 204 + StatusCode_ResetContent StatusCode = 205 + StatusCode_PartialContent StatusCode = 206 + StatusCode_MultiStatus StatusCode = 207 + StatusCode_AlreadyReported StatusCode = 208 + StatusCode_IMUsed StatusCode = 226 + StatusCode_MultipleChoices StatusCode = 300 + StatusCode_MovedPermanently StatusCode = 301 + StatusCode_Found StatusCode = 302 + StatusCode_SeeOther StatusCode = 303 + StatusCode_NotModified StatusCode = 304 + StatusCode_UseProxy StatusCode = 305 + StatusCode_TemporaryRedirect StatusCode = 307 + StatusCode_PermanentRedirect StatusCode = 308 + StatusCode_BadRequest StatusCode = 400 + StatusCode_Unauthorized StatusCode = 401 + StatusCode_PaymentRequired StatusCode = 402 + StatusCode_Forbidden StatusCode = 403 + StatusCode_NotFound StatusCode = 404 + StatusCode_MethodNotAllowed StatusCode = 405 + StatusCode_NotAcceptable StatusCode = 406 + StatusCode_ProxyAuthenticationRequired StatusCode = 407 + StatusCode_RequestTimeout StatusCode = 408 + StatusCode_Conflict StatusCode = 409 + StatusCode_Gone StatusCode = 410 + StatusCode_LengthRequired StatusCode = 411 + StatusCode_PreconditionFailed StatusCode = 412 + StatusCode_PayloadTooLarge StatusCode = 413 + StatusCode_URITooLong StatusCode = 414 + StatusCode_UnsupportedMediaType StatusCode = 415 + StatusCode_RangeNotSatisfiable StatusCode = 416 + StatusCode_ExpectationFailed StatusCode = 417 + StatusCode_MisdirectedRequest StatusCode = 421 + StatusCode_UnprocessableEntity StatusCode = 422 + StatusCode_Locked StatusCode = 423 + StatusCode_FailedDependency StatusCode = 424 + StatusCode_UpgradeRequired StatusCode = 426 + StatusCode_PreconditionRequired StatusCode = 428 + StatusCode_TooManyRequests StatusCode = 429 + StatusCode_RequestHeaderFieldsTooLarge StatusCode = 431 + StatusCode_InternalServerError StatusCode = 500 + StatusCode_NotImplemented StatusCode = 501 + StatusCode_BadGateway StatusCode = 502 + StatusCode_ServiceUnavailable StatusCode = 503 + StatusCode_GatewayTimeout StatusCode = 504 + StatusCode_HTTPVersionNotSupported StatusCode = 505 + StatusCode_VariantAlsoNegotiates StatusCode = 506 + StatusCode_InsufficientStorage StatusCode = 507 + StatusCode_LoopDetected StatusCode = 508 + StatusCode_NotExtended StatusCode = 510 + StatusCode_NetworkAuthenticationRequired StatusCode = 511 +) + +// Enum value maps for StatusCode. +var ( + StatusCode_name = map[int32]string{ + 0: "Empty", + 100: "Continue", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "NonAuthoritativeInformation", + 204: "NoContent", + 205: "ResetContent", + 206: "PartialContent", + 207: "MultiStatus", + 208: "AlreadyReported", + 226: "IMUsed", + 300: "MultipleChoices", + 301: "MovedPermanently", + 302: "Found", + 303: "SeeOther", + 304: "NotModified", + 305: "UseProxy", + 307: "TemporaryRedirect", + 308: "PermanentRedirect", + 400: "BadRequest", + 401: "Unauthorized", + 402: "PaymentRequired", + 403: "Forbidden", + 404: "NotFound", + 405: "MethodNotAllowed", + 406: "NotAcceptable", + 407: "ProxyAuthenticationRequired", + 408: "RequestTimeout", + 409: "Conflict", + 410: "Gone", + 411: "LengthRequired", + 412: "PreconditionFailed", + 413: "PayloadTooLarge", + 414: "URITooLong", + 415: "UnsupportedMediaType", + 416: "RangeNotSatisfiable", + 417: "ExpectationFailed", + 421: "MisdirectedRequest", + 422: "UnprocessableEntity", + 423: "Locked", + 424: "FailedDependency", + 426: "UpgradeRequired", + 428: "PreconditionRequired", + 429: "TooManyRequests", + 431: "RequestHeaderFieldsTooLarge", + 500: "InternalServerError", + 501: "NotImplemented", + 502: "BadGateway", + 503: "ServiceUnavailable", + 504: "GatewayTimeout", + 505: "HTTPVersionNotSupported", + 506: "VariantAlsoNegotiates", + 507: "InsufficientStorage", + 508: "LoopDetected", + 510: "NotExtended", + 511: "NetworkAuthenticationRequired", + } + StatusCode_value = map[string]int32{ + "Empty": 0, + "Continue": 100, + "OK": 200, + "Created": 201, + "Accepted": 202, + "NonAuthoritativeInformation": 203, + "NoContent": 204, + "ResetContent": 205, + "PartialContent": 206, + "MultiStatus": 207, + "AlreadyReported": 208, + "IMUsed": 226, + "MultipleChoices": 300, + "MovedPermanently": 301, + "Found": 302, + "SeeOther": 303, + "NotModified": 304, + "UseProxy": 305, + "TemporaryRedirect": 307, + "PermanentRedirect": 308, + "BadRequest": 400, + "Unauthorized": 401, + "PaymentRequired": 402, + "Forbidden": 403, + "NotFound": 404, + "MethodNotAllowed": 405, + "NotAcceptable": 406, + "ProxyAuthenticationRequired": 407, + "RequestTimeout": 408, + "Conflict": 409, + "Gone": 410, + "LengthRequired": 411, + "PreconditionFailed": 412, + "PayloadTooLarge": 413, + "URITooLong": 414, + "UnsupportedMediaType": 415, + "RangeNotSatisfiable": 416, + "ExpectationFailed": 417, + "MisdirectedRequest": 421, + "UnprocessableEntity": 422, + "Locked": 423, + "FailedDependency": 424, + "UpgradeRequired": 426, + "PreconditionRequired": 428, + "TooManyRequests": 429, + "RequestHeaderFieldsTooLarge": 431, + "InternalServerError": 500, + "NotImplemented": 501, + "BadGateway": 502, + "ServiceUnavailable": 503, + "GatewayTimeout": 504, + "HTTPVersionNotSupported": 505, + "VariantAlsoNegotiates": 506, + "InsufficientStorage": 507, + "LoopDetected": 508, + "NotExtended": 510, + "NetworkAuthenticationRequired": 511, + } +) + +func (x StatusCode) Enum() *StatusCode { + p := new(StatusCode) + *p = x + return p +} + +func (x StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_http_status_proto_enumTypes[0].Descriptor() +} + +func (StatusCode) Type() protoreflect.EnumType { + return &file_envoy_type_v3_http_status_proto_enumTypes[0] +} + +func (x StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StatusCode.Descriptor instead. +func (StatusCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0} +} + +// HTTP status. +type HttpStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Supplies HTTP response code. + Code StatusCode `protobuf:"varint,1,opt,name=code,proto3,enum=envoy.type.v3.StatusCode" json:"code,omitempty"` +} + +func (x *HttpStatus) Reset() { + *x = HttpStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_http_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpStatus) ProtoMessage() {} + +func (x *HttpStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_http_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpStatus.ProtoReflect.Descriptor instead. +func (*HttpStatus) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpStatus) GetCode() StatusCode { + if x != nil { + return x.Code + } + return StatusCode_Empty +} + +var File_envoy_type_v3_http_status_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_http_status_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x65, 0x0a, 0x0a, 0x48, + 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2a, 0xb5, 0x09, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x10, 0x64, 0x12, 0x07, 0x0a, 0x02, 0x4f, 0x4b, + 0x10, 0xc8, 0x01, 0x12, 0x0c, 0x0a, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x10, 0xc9, + 0x01, 0x12, 0x0d, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x10, 0xca, 0x01, + 0x12, 0x20, 0x0a, 0x1b, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, + 0xcb, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x4e, 0x6f, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, + 0xcc, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x10, 0xcd, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xce, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0xcf, 0x01, 0x12, 0x14, 0x0a, 0x0f, + 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, + 0xd0, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x4d, 0x55, 0x73, 0x65, 0x64, 0x10, 0xe2, 0x01, 0x12, + 0x14, 0x0a, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x43, 0x68, 0x6f, 0x69, 0x63, + 0x65, 0x73, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x50, 0x65, + 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x10, 0xad, 0x02, 0x12, 0x0a, 0x0a, 0x05, + 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xae, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x65, 0x65, 0x4f, + 0x74, 0x68, 0x65, 0x72, 0x10, 0xaf, 0x02, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0xb0, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x55, 0x73, 0x65, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x10, 0xb1, 0x02, 0x12, 0x16, 0x0a, 0x11, 0x54, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb3, 0x02, + 0x12, 0x16, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb4, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x90, 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x55, 0x6e, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x10, 0x91, 0x03, 0x12, 0x14, 0x0a, 0x0f, + 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, + 0x92, 0x03, 0x12, 0x0e, 0x0a, 0x09, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x10, + 0x93, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x94, + 0x03, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x6f, 0x74, 0x41, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x10, 0x95, 0x03, 0x12, 0x12, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x10, 0x96, 0x03, 0x12, 0x20, 0x0a, 0x1b, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0x97, 0x03, 0x12, 0x13, + 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x10, 0x98, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x10, + 0x99, 0x03, 0x12, 0x09, 0x0a, 0x04, 0x47, 0x6f, 0x6e, 0x65, 0x10, 0x9a, 0x03, 0x12, 0x13, 0x0a, + 0x0e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, + 0x9b, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x9c, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0x9d, + 0x03, 0x12, 0x0f, 0x0a, 0x0a, 0x55, 0x52, 0x49, 0x54, 0x6f, 0x6f, 0x4c, 0x6f, 0x6e, 0x67, 0x10, + 0x9e, 0x03, 0x12, 0x19, 0x0a, 0x14, 0x55, 0x6e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x10, 0x9f, 0x03, 0x12, 0x18, 0x0a, + 0x13, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x6f, 0x74, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x10, 0xa0, 0x03, 0x12, 0x16, 0x0a, 0x11, 0x45, 0x78, 0x70, 0x65, 0x63, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0xa1, 0x03, 0x12, + 0x17, 0x0a, 0x12, 0x4d, 0x69, 0x73, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0xa5, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x6e, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x10, + 0xa6, 0x03, 0x12, 0x0b, 0x0a, 0x06, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x10, 0xa7, 0x03, 0x12, + 0x15, 0x0a, 0x10, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x10, 0xa8, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xaa, 0x03, 0x12, 0x19, 0x0a, 0x14, + 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x10, 0xac, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x54, 0x6f, 0x6f, 0x4d, 0x61, + 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x10, 0xad, 0x03, 0x12, 0x20, 0x0a, + 0x1b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0xaf, 0x03, 0x12, + 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xf4, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x6f, 0x74, + 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x10, 0xf5, 0x03, 0x12, 0x0f, + 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, 0xf6, 0x03, 0x12, + 0x17, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x10, 0xf7, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x10, 0xf8, 0x03, 0x12, 0x1c, 0x0a, + 0x17, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, 0xf9, 0x03, 0x12, 0x1a, 0x0a, 0x15, 0x56, + 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x73, 0x6f, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x65, 0x73, 0x10, 0xfa, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x73, 0x75, 0x66, + 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x10, 0xfb, + 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x10, 0xfc, 0x03, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x10, 0xfe, 0x03, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xff, 0x03, 0x42, 0x75, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_http_status_proto_rawDescOnce sync.Once + file_envoy_type_v3_http_status_proto_rawDescData = file_envoy_type_v3_http_status_proto_rawDesc +) + +func file_envoy_type_v3_http_status_proto_rawDescGZIP() []byte { + file_envoy_type_v3_http_status_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_http_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_status_proto_rawDescData) + }) + return file_envoy_type_v3_http_status_proto_rawDescData +} + +var file_envoy_type_v3_http_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_http_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_http_status_proto_goTypes = []interface{}{ + (StatusCode)(0), // 0: envoy.type.v3.StatusCode + (*HttpStatus)(nil), // 1: envoy.type.v3.HttpStatus +} +var file_envoy_type_v3_http_status_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.HttpStatus.code:type_name -> envoy.type.v3.StatusCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_http_status_proto_init() } +func file_envoy_type_v3_http_status_proto_init() { + if File_envoy_type_v3_http_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_http_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_http_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_http_status_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_http_status_proto_depIdxs, + EnumInfos: file_envoy_type_v3_http_status_proto_enumTypes, + MessageInfos: file_envoy_type_v3_http_status_proto_msgTypes, + }.Build() + File_envoy_type_v3_http_status_proto = out.File + file_envoy_type_v3_http_status_proto_rawDesc = nil + file_envoy_type_v3_http_status_proto_goTypes = nil + file_envoy_type_v3_http_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go new file mode 100644 index 000000000..d3f76e937 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go @@ -0,0 +1,162 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpStatus with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpStatusMultiError, or +// nil if none found. +func (m *HttpStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _HttpStatus_Code_NotInLookup[m.GetCode()]; ok { + err := HttpStatusValidationError{ + field: "Code", + reason: "value must not be in list [Empty]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := StatusCode_name[int32(m.GetCode())]; !ok { + err := HttpStatusValidationError{ + field: "Code", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpStatusMultiError(errors) + } + + return nil +} + +// HttpStatusMultiError is an error wrapping multiple validation errors +// returned by HttpStatus.ValidateAll() if the designated constraints aren't met. +type HttpStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpStatusMultiError) AllErrors() []error { return m } + +// HttpStatusValidationError is the validation error returned by +// HttpStatus.Validate if the designated constraints aren't met. +type HttpStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpStatusValidationError) ErrorName() string { return "HttpStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HttpStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpStatusValidationError{} + +var _HttpStatus_Code_NotInLookup = map[StatusCode]struct{}{ + 0: {}, +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go new file mode 100644 index 000000000..f25340d84 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go @@ -0,0 +1,70 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Code != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Code)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go new file mode 100644 index 000000000..45eb66186 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go @@ -0,0 +1,316 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Fraction percentages support several fixed denominator values. +type FractionalPercent_DenominatorType int32 + +const ( + // 100. + // + // **Example**: 1/100 = 1%. + FractionalPercent_HUNDRED FractionalPercent_DenominatorType = 0 + // 10,000. + // + // **Example**: 1/10000 = 0.01%. + FractionalPercent_TEN_THOUSAND FractionalPercent_DenominatorType = 1 + // 1,000,000. + // + // **Example**: 1/1000000 = 0.0001%. + FractionalPercent_MILLION FractionalPercent_DenominatorType = 2 +) + +// Enum value maps for FractionalPercent_DenominatorType. +var ( + FractionalPercent_DenominatorType_name = map[int32]string{ + 0: "HUNDRED", + 1: "TEN_THOUSAND", + 2: "MILLION", + } + FractionalPercent_DenominatorType_value = map[string]int32{ + "HUNDRED": 0, + "TEN_THOUSAND": 1, + "MILLION": 2, + } +) + +func (x FractionalPercent_DenominatorType) Enum() *FractionalPercent_DenominatorType { + p := new(FractionalPercent_DenominatorType) + *p = x + return p +} + +func (x FractionalPercent_DenominatorType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FractionalPercent_DenominatorType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_percent_proto_enumTypes[0].Descriptor() +} + +func (FractionalPercent_DenominatorType) Type() protoreflect.EnumType { + return &file_envoy_type_v3_percent_proto_enumTypes[0] +} + +func (x FractionalPercent_DenominatorType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FractionalPercent_DenominatorType.Descriptor instead. +func (FractionalPercent_DenominatorType) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1, 0} +} + +// Identifies a percentage, in the range [0.0, 100.0]. +type Percent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Percent) Reset() { + *x = Percent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_percent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Percent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Percent) ProtoMessage() {} + +func (x *Percent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_percent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Percent.ProtoReflect.Descriptor instead. +func (*Percent) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{0} +} + +func (x *Percent) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// A fractional percentage is used in cases in which for performance reasons performing floating +// point to integer conversions during randomness calculations is undesirable. The message includes +// both a numerator and denominator that together determine the final fractional value. +// +// * **Example**: 1/100 = 1%. +// * **Example**: 3/10000 = 0.03%. +type FractionalPercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the numerator. Defaults to 0. + Numerator uint32 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` + // Specifies the denominator. If the denominator specified is less than the numerator, the final + // fractional percentage is capped at 1 (100%). + Denominator FractionalPercent_DenominatorType `protobuf:"varint,2,opt,name=denominator,proto3,enum=envoy.type.v3.FractionalPercent_DenominatorType" json:"denominator,omitempty"` +} + +func (x *FractionalPercent) Reset() { + *x = FractionalPercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_percent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FractionalPercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FractionalPercent) ProtoMessage() {} + +func (x *FractionalPercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_percent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FractionalPercent.ProtoReflect.Descriptor instead. +func (*FractionalPercent) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1} +} + +func (x *FractionalPercent) GetNumerator() uint32 { + if x != nil { + return x.Numerator + } + return 0 +} + +func (x *FractionalPercent) GetDenominator() FractionalPercent_DenominatorType { + if x != nil { + return x.Denominator + } + return FractionalPercent_HUNDRED +} + +var File_envoy_type_v3_percent_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_percent_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x53, 0x0a, 0x07, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x19, 0x9a, 0xc5, 0x88, 0x1e, 0x14, 0x0a, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0xf3, 0x01, 0x0a, + 0x11, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x5c, 0x0a, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3d, + 0x0a, 0x0f, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x55, 0x4e, 0x44, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x45, 0x4e, 0x5f, 0x54, 0x48, 0x4f, 0x55, 0x53, 0x41, 0x4e, 0x44, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x49, 0x4c, 0x4c, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x3a, 0x23, 0x9a, + 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x42, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_percent_proto_rawDescOnce sync.Once + file_envoy_type_v3_percent_proto_rawDescData = file_envoy_type_v3_percent_proto_rawDesc +) + +func file_envoy_type_v3_percent_proto_rawDescGZIP() []byte { + file_envoy_type_v3_percent_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_percent_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_percent_proto_rawDescData) + }) + return file_envoy_type_v3_percent_proto_rawDescData +} + +var file_envoy_type_v3_percent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_percent_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_v3_percent_proto_goTypes = []interface{}{ + (FractionalPercent_DenominatorType)(0), // 0: envoy.type.v3.FractionalPercent.DenominatorType + (*Percent)(nil), // 1: envoy.type.v3.Percent + (*FractionalPercent)(nil), // 2: envoy.type.v3.FractionalPercent +} +var file_envoy_type_v3_percent_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.FractionalPercent.denominator:type_name -> envoy.type.v3.FractionalPercent.DenominatorType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_percent_proto_init() } +func file_envoy_type_v3_percent_proto_init() { + if File_envoy_type_v3_percent_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_percent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Percent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_percent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FractionalPercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_percent_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_percent_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_percent_proto_depIdxs, + EnumInfos: file_envoy_type_v3_percent_proto_enumTypes, + MessageInfos: file_envoy_type_v3_percent_proto_msgTypes, + }.Build() + File_envoy_type_v3_percent_proto = out.File + file_envoy_type_v3_percent_proto_rawDesc = nil + file_envoy_type_v3_percent_proto_goTypes = nil + file_envoy_type_v3_percent_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go new file mode 100644 index 000000000..2929f39f8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go @@ -0,0 +1,261 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Percent with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Percent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Percent with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PercentMultiError, or nil if none found. +func (m *Percent) ValidateAll() error { + return m.validate(true) +} + +func (m *Percent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetValue(); val < 0 || val > 100 { + err := PercentValidationError{ + field: "Value", + reason: "value must be inside range [0, 100]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PercentMultiError(errors) + } + + return nil +} + +// PercentMultiError is an error wrapping multiple validation errors returned +// by Percent.ValidateAll() if the designated constraints aren't met. +type PercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PercentMultiError) AllErrors() []error { return m } + +// PercentValidationError is the validation error returned by Percent.Validate +// if the designated constraints aren't met. +type PercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PercentValidationError) ErrorName() string { return "PercentValidationError" } + +// Error satisfies the builtin error interface +func (e PercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PercentValidationError{} + +// Validate checks the field values on FractionalPercent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *FractionalPercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FractionalPercent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FractionalPercentMultiError, or nil if none found. +func (m *FractionalPercent) ValidateAll() error { + return m.validate(true) +} + +func (m *FractionalPercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Numerator + + if _, ok := FractionalPercent_DenominatorType_name[int32(m.GetDenominator())]; !ok { + err := FractionalPercentValidationError{ + field: "Denominator", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FractionalPercentMultiError(errors) + } + + return nil +} + +// FractionalPercentMultiError is an error wrapping multiple validation errors +// returned by FractionalPercent.ValidateAll() if the designated constraints +// aren't met. +type FractionalPercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FractionalPercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FractionalPercentMultiError) AllErrors() []error { return m } + +// FractionalPercentValidationError is the validation error returned by +// FractionalPercent.Validate if the designated constraints aren't met. +type FractionalPercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FractionalPercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FractionalPercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FractionalPercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FractionalPercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FractionalPercentValidationError) ErrorName() string { + return "FractionalPercentValidationError" +} + +// Error satisfies the builtin error interface +func (e FractionalPercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFractionalPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FractionalPercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FractionalPercentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go new file mode 100644 index 000000000..82c60c5d9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go @@ -0,0 +1,132 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Percent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Percent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Percent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Denominator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Denominator)) + i-- + dAtA[i] = 0x10 + } + if m.Numerator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Numerator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Percent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *FractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Numerator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Numerator)) + } + if m.Denominator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Denominator)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go new file mode 100644 index 000000000..63be48f3c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the int64 start and end of the range using half-open interval semantics [start, +// end). +type Int64Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int64Range) Reset() { + *x = Int64Range{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Range) ProtoMessage() {} + +func (x *Int64Range) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead. +func (*Int64Range) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64Range) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int64Range) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +// Specifies the int32 start and end of the range using half-open interval semantics [start, +// end). +type Int32Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int32Range) Reset() { + *x = Int32Range{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Range) ProtoMessage() {} + +func (x *Int32Range) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead. +func (*Int32Range) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32Range) GetStart() int32 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int32Range) GetEnd() int32 { + if x != nil { + return x.End + } + return 0 +} + +// Specifies the double start and end of the range using half-open interval semantics [start, +// end). +type DoubleRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *DoubleRange) Reset() { + *x = DoubleRange{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRange) ProtoMessage() {} + +func (x *DoubleRange) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead. +func (*DoubleRange) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRange) GetStart() float64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *DoubleRange) GetEnd() float64 { + if x != nil { + return x.End + } + return 0 +} + +var File_envoy_type_v3_range_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x22, 0x52, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x22, 0x54, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, + 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x70, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_range_proto_rawDescOnce sync.Once + file_envoy_type_v3_range_proto_rawDescData = file_envoy_type_v3_range_proto_rawDesc +) + +func file_envoy_type_v3_range_proto_rawDescGZIP() []byte { + file_envoy_type_v3_range_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_range_proto_rawDescData) + }) + return file_envoy_type_v3_range_proto_rawDescData +} + +var file_envoy_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_v3_range_proto_goTypes = []interface{}{ + (*Int64Range)(nil), // 0: envoy.type.v3.Int64Range + (*Int32Range)(nil), // 1: envoy.type.v3.Int32Range + (*DoubleRange)(nil), // 2: envoy.type.v3.DoubleRange +} +var file_envoy_type_v3_range_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_range_proto_init() } +func file_envoy_type_v3_range_proto_init() { + if File_envoy_type_v3_range_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_range_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_range_proto_depIdxs, + MessageInfos: file_envoy_type_v3_range_proto_msgTypes, + }.Build() + File_envoy_type_v3_range_proto = out.File + file_envoy_type_v3_range_proto_rawDesc = nil + file_envoy_type_v3_range_proto_goTypes = nil + file_envoy_type_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go new file mode 100644 index 000000000..6bf697e9b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go @@ -0,0 +1,346 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int64Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int64RangeMultiError, or +// nil if none found. +func (m *Int64Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int64RangeMultiError(errors) + } + + return nil +} + +// Int64RangeMultiError is an error wrapping multiple validation errors +// returned by Int64Range.ValidateAll() if the designated constraints aren't met. +type Int64RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMultiError) AllErrors() []error { return m } + +// Int64RangeValidationError is the validation error returned by +// Int64Range.Validate if the designated constraints aren't met. +type Int64RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int64RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeValidationError{} + +// Validate checks the field values on Int32Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int32Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int32RangeMultiError, or +// nil if none found. +func (m *Int32Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int32RangeMultiError(errors) + } + + return nil +} + +// Int32RangeMultiError is an error wrapping multiple validation errors +// returned by Int32Range.ValidateAll() if the designated constraints aren't met. +type Int32RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMultiError) AllErrors() []error { return m } + +// Int32RangeValidationError is the validation error returned by +// Int32Range.Validate if the designated constraints aren't met. +type Int32RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int32RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeValidationError{} + +// Validate checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleRangeMultiError, or +// nil if none found. +func (m *DoubleRange) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return DoubleRangeMultiError(errors) + } + + return nil +} + +// DoubleRangeMultiError is an error wrapping multiple validation errors +// returned by DoubleRange.ValidateAll() if the designated constraints aren't met. +type DoubleRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMultiError) AllErrors() []error { return m } + +// DoubleRangeValidationError is the validation error returned by +// DoubleRange.Validate if the designated constraints aren't met. +type DoubleRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go new file mode 100644 index 000000000..7309b8c14 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go @@ -0,0 +1,200 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Int64Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DoubleRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.End)))) + i-- + dAtA[i] = 0x11 + } + if m.Start != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Start)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Int64Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *Int32Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 9 + } + if m.End != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go new file mode 100644 index 000000000..e7663f294 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go @@ -0,0 +1,407 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Choose between allow all and deny all. +type RateLimitStrategy_BlanketRule int32 + +const ( + RateLimitStrategy_ALLOW_ALL RateLimitStrategy_BlanketRule = 0 + RateLimitStrategy_DENY_ALL RateLimitStrategy_BlanketRule = 1 +) + +// Enum value maps for RateLimitStrategy_BlanketRule. +var ( + RateLimitStrategy_BlanketRule_name = map[int32]string{ + 0: "ALLOW_ALL", + 1: "DENY_ALL", + } + RateLimitStrategy_BlanketRule_value = map[string]int32{ + "ALLOW_ALL": 0, + "DENY_ALL": 1, + } +) + +func (x RateLimitStrategy_BlanketRule) Enum() *RateLimitStrategy_BlanketRule { + p := new(RateLimitStrategy_BlanketRule) + *p = x + return p +} + +func (x RateLimitStrategy_BlanketRule) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitStrategy_BlanketRule) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0].Descriptor() +} + +func (RateLimitStrategy_BlanketRule) Type() protoreflect.EnumType { + return &file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0] +} + +func (x RateLimitStrategy_BlanketRule) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitStrategy_BlanketRule.Descriptor instead. +func (RateLimitStrategy_BlanketRule) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +type RateLimitStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Strategy: + // + // *RateLimitStrategy_BlanketRule_ + // *RateLimitStrategy_RequestsPerTimeUnit_ + // *RateLimitStrategy_TokenBucket + Strategy isRateLimitStrategy_Strategy `protobuf_oneof:"strategy"` +} + +func (x *RateLimitStrategy) Reset() { + *x = RateLimitStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy) ProtoMessage() {} + +func (x *RateLimitStrategy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0} +} + +func (m *RateLimitStrategy) GetStrategy() isRateLimitStrategy_Strategy { + if m != nil { + return m.Strategy + } + return nil +} + +func (x *RateLimitStrategy) GetBlanketRule() RateLimitStrategy_BlanketRule { + if x, ok := x.GetStrategy().(*RateLimitStrategy_BlanketRule_); ok { + return x.BlanketRule + } + return RateLimitStrategy_ALLOW_ALL +} + +func (x *RateLimitStrategy) GetRequestsPerTimeUnit() *RateLimitStrategy_RequestsPerTimeUnit { + if x, ok := x.GetStrategy().(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + return x.RequestsPerTimeUnit + } + return nil +} + +func (x *RateLimitStrategy) GetTokenBucket() *TokenBucket { + if x, ok := x.GetStrategy().(*RateLimitStrategy_TokenBucket); ok { + return x.TokenBucket + } + return nil +} + +type isRateLimitStrategy_Strategy interface { + isRateLimitStrategy_Strategy() +} + +type RateLimitStrategy_BlanketRule_ struct { + // Allow or Deny the requests. + // If unset, allow all. + BlanketRule RateLimitStrategy_BlanketRule `protobuf:"varint,1,opt,name=blanket_rule,json=blanketRule,proto3,enum=envoy.type.v3.RateLimitStrategy_BlanketRule,oneof"` +} + +type RateLimitStrategy_RequestsPerTimeUnit_ struct { + // Best-effort limit of the number of requests per time unit, f.e. requests per second. + // Does not prescribe any specific rate limiting algorithm, see :ref:`RequestsPerTimeUnit + // ` for details. + RequestsPerTimeUnit *RateLimitStrategy_RequestsPerTimeUnit `protobuf:"bytes,2,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3,oneof"` +} + +type RateLimitStrategy_TokenBucket struct { + // Limit the requests by consuming tokens from the Token Bucket. + // Allow the same number of requests as the number of tokens available in + // the token bucket. + TokenBucket *TokenBucket `protobuf:"bytes,3,opt,name=token_bucket,json=tokenBucket,proto3,oneof"` +} + +func (*RateLimitStrategy_BlanketRule_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_RequestsPerTimeUnit_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_TokenBucket) isRateLimitStrategy_Strategy() {} + +// Best-effort limit of the number of requests per time unit. +// +// Allows to specify the desired requests per second (RPS, QPS), requests per minute (QPM, RPM), +// etc., without specifying a rate limiting algorithm implementation. +// +// “RequestsPerTimeUnit“ strategy does not demand any specific rate limiting algorithm to be +// used (in contrast to the :ref:`TokenBucket `, +// for example). It implies that the implementation details of rate limiting algorithm are +// irrelevant as long as the configured number of "requests per time unit" is achieved. +// +// Note that the “TokenBucket“ is still a valid implementation of the “RequestsPerTimeUnit“ +// strategy, and may be chosen to enforce the rate limit. However, there's no guarantee it will be +// the “TokenBucket“ in particular, and not the Leaky Bucket, the Sliding Window, or any other +// rate limiting algorithm that fulfills the requirements. +type RateLimitStrategy_RequestsPerTimeUnit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The desired number of requests per :ref:`time_unit + // ` to allow. + // If set to “0“, deny all (equivalent to “BlanketRule.DENY_ALL“). + // + // .. note:: + // + // Note that the algorithm implementation determines the course of action for the requests + // over the limit. As long as the ``requests_per_time_unit`` converges on the desired value, + // it's allowed to treat this field as a soft-limit: allow bursts, redistribute the allowance + // over time, etc. + RequestsPerTimeUnit uint64 `protobuf:"varint,1,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3" json:"requests_per_time_unit,omitempty"` + // The unit of time. Ignored when :ref:`requests_per_time_unit + // ` + // is “0“ (deny all). + TimeUnit RateLimitUnit `protobuf:"varint,2,opt,name=time_unit,json=timeUnit,proto3,enum=envoy.type.v3.RateLimitUnit" json:"time_unit,omitempty"` +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) Reset() { + *x = RateLimitStrategy_RequestsPerTimeUnit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy_RequestsPerTimeUnit) ProtoMessage() {} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy_RequestsPerTimeUnit.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy_RequestsPerTimeUnit) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetRequestsPerTimeUnit() uint64 { + if x != nil { + return x.RequestsPerTimeUnit + } + return 0 +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetTimeUnit() RateLimitUnit { + if x != nil { + return x.TimeUnit + } + return RateLimitUnit_UNKNOWN +} + +var File_envoy_type_v3_ratelimit_strategy_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, + 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x5b, 0x0a, 0x0c, + 0x62, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x2e, 0x42, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, + 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x6b, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, + 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x8f, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, + 0x33, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x74, 0x12, 0x43, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x55, 0x6e, 0x69, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x22, 0x2a, 0x0a, 0x0b, 0x42, 0x6c, 0x61, + 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4e, 0x59, 0x5f, + 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x42, 0x0f, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce sync.Once + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = file_envoy_type_v3_ratelimit_strategy_proto_rawDesc +) + +func file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP() []byte { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_strategy_proto_rawDescData) + }) + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescData +} + +var file_envoy_type_v3_ratelimit_strategy_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_ratelimit_strategy_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_v3_ratelimit_strategy_proto_goTypes = []interface{}{ + (RateLimitStrategy_BlanketRule)(0), // 0: envoy.type.v3.RateLimitStrategy.BlanketRule + (*RateLimitStrategy)(nil), // 1: envoy.type.v3.RateLimitStrategy + (*RateLimitStrategy_RequestsPerTimeUnit)(nil), // 2: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + (*TokenBucket)(nil), // 3: envoy.type.v3.TokenBucket + (RateLimitUnit)(0), // 4: envoy.type.v3.RateLimitUnit +} +var file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.RateLimitStrategy.blanket_rule:type_name -> envoy.type.v3.RateLimitStrategy.BlanketRule + 2, // 1: envoy.type.v3.RateLimitStrategy.requests_per_time_unit:type_name -> envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + 3, // 2: envoy.type.v3.RateLimitStrategy.token_bucket:type_name -> envoy.type.v3.TokenBucket + 4, // 3: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit.time_unit:type_name -> envoy.type.v3.RateLimitUnit + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_ratelimit_strategy_proto_init() } +func file_envoy_type_v3_ratelimit_strategy_proto_init() { + if File_envoy_type_v3_ratelimit_strategy_proto != nil { + return + } + file_envoy_type_v3_ratelimit_unit_proto_init() + file_envoy_type_v3_token_bucket_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy_RequestsPerTimeUnit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RateLimitStrategy_BlanketRule_)(nil), + (*RateLimitStrategy_RequestsPerTimeUnit_)(nil), + (*RateLimitStrategy_TokenBucket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_ratelimit_strategy_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_ratelimit_strategy_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_ratelimit_strategy_proto_depIdxs, + EnumInfos: file_envoy_type_v3_ratelimit_strategy_proto_enumTypes, + MessageInfos: file_envoy_type_v3_ratelimit_strategy_proto_msgTypes, + }.Build() + File_envoy_type_v3_ratelimit_strategy_proto = out.File + file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = nil + file_envoy_type_v3_ratelimit_strategy_proto_goTypes = nil + file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go new file mode 100644 index 000000000..eebce17ea --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go @@ -0,0 +1,381 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RateLimitStrategy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimitStrategy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimitStrategyMultiError, or nil if none found. +func (m *RateLimitStrategy) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofStrategyPresent := false + switch v := m.Strategy.(type) { + case *RateLimitStrategy_BlanketRule_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if _, ok := RateLimitStrategy_BlanketRule_name[int32(m.GetBlanketRule())]; !ok { + err := RateLimitStrategyValidationError{ + field: "BlanketRule", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RateLimitStrategy_RequestsPerTimeUnit_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetRequestsPerTimeUnit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestsPerTimeUnit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimitStrategy_TokenBucket: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetTokenBucket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTokenBucket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofStrategyPresent { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategyMultiError(errors) + } + + return nil +} + +// RateLimitStrategyMultiError is an error wrapping multiple validation errors +// returned by RateLimitStrategy.ValidateAll() if the designated constraints +// aren't met. +type RateLimitStrategyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategyMultiError) AllErrors() []error { return m } + +// RateLimitStrategyValidationError is the validation error returned by +// RateLimitStrategy.Validate if the designated constraints aren't met. +type RateLimitStrategyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategyValidationError) ErrorName() string { + return "RateLimitStrategyValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategyValidationError{} + +// Validate checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RateLimitStrategy_RequestsPerTimeUnit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimitStrategy_RequestsPerTimeUnitMultiError, or nil if none found. +func (m *RateLimitStrategy_RequestsPerTimeUnit) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RequestsPerTimeUnit + + if _, ok := RateLimitUnit_name[int32(m.GetTimeUnit())]; !ok { + err := RateLimitStrategy_RequestsPerTimeUnitValidationError{ + field: "TimeUnit", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategy_RequestsPerTimeUnitMultiError(errors) + } + + return nil +} + +// RateLimitStrategy_RequestsPerTimeUnitMultiError is an error wrapping +// multiple validation errors returned by +// RateLimitStrategy_RequestsPerTimeUnit.ValidateAll() if the designated +// constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) AllErrors() []error { return m } + +// RateLimitStrategy_RequestsPerTimeUnitValidationError is the validation error +// returned by RateLimitStrategy_RequestsPerTimeUnit.Validate if the +// designated constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) ErrorName() string { + return "RateLimitStrategy_RequestsPerTimeUnitValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy_RequestsPerTimeUnit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategy_RequestsPerTimeUnitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategy_RequestsPerTimeUnitValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go new file mode 100644 index 000000000..c35990b76 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go @@ -0,0 +1,241 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TimeUnit)) + i-- + dAtA[i] = 0x10 + } + if m.RequestsPerTimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestsPerTimeUnit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Strategy.(*RateLimitStrategy_TokenBucket); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_BlanketRule_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BlanketRule)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestsPerTimeUnit != nil { + size, err := m.RequestsPerTimeUnit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TokenBucket != nil { + size, err := m.TokenBucket.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestsPerTimeUnit)) + } + if m.TimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TimeUnit)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Strategy.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy_BlanketRule_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.BlanketRule)) + return n +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != nil { + l = m.RequestsPerTimeUnit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimitStrategy_TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TokenBucket != nil { + l = m.TokenBucket.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go new file mode 100644 index 000000000..368688888 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/ratelimit_unit.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Identifies the unit of of time for rate limit. +type RateLimitUnit int32 + +const ( + // The time unit is not known. + RateLimitUnit_UNKNOWN RateLimitUnit = 0 + // The time unit representing a second. + RateLimitUnit_SECOND RateLimitUnit = 1 + // The time unit representing a minute. + RateLimitUnit_MINUTE RateLimitUnit = 2 + // The time unit representing an hour. + RateLimitUnit_HOUR RateLimitUnit = 3 + // The time unit representing a day. + RateLimitUnit_DAY RateLimitUnit = 4 + // The time unit representing a month. + RateLimitUnit_MONTH RateLimitUnit = 5 + // The time unit representing a year. + RateLimitUnit_YEAR RateLimitUnit = 6 +) + +// Enum value maps for RateLimitUnit. +var ( + RateLimitUnit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SECOND", + 2: "MINUTE", + 3: "HOUR", + 4: "DAY", + 5: "MONTH", + 6: "YEAR", + } + RateLimitUnit_value = map[string]int32{ + "UNKNOWN": 0, + "SECOND": 1, + "MINUTE": 2, + "HOUR": 3, + "DAY": 4, + "MONTH": 5, + "YEAR": 6, + } +) + +func (x RateLimitUnit) Enum() *RateLimitUnit { + p := new(RateLimitUnit) + *p = x + return p +} + +func (x RateLimitUnit) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitUnit) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0].Descriptor() +} + +func (RateLimitUnit) Type() protoreflect.EnumType { + return &file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0] +} + +func (x RateLimitUnit) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitUnit.Descriptor instead. +func (RateLimitUnit) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_type_v3_ratelimit_unit_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_ratelimit_unit_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2a, 0x5c, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x55, + 0x6e, 0x69, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, + 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, + 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, + 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce sync.Once + file_envoy_type_v3_ratelimit_unit_proto_rawDescData = file_envoy_type_v3_ratelimit_unit_proto_rawDesc +) + +func file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP() []byte { + file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_ratelimit_unit_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_unit_proto_rawDescData) + }) + return file_envoy_type_v3_ratelimit_unit_proto_rawDescData +} + +var file_envoy_type_v3_ratelimit_unit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_ratelimit_unit_proto_goTypes = []interface{}{ + (RateLimitUnit)(0), // 0: envoy.type.v3.RateLimitUnit +} +var file_envoy_type_v3_ratelimit_unit_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_ratelimit_unit_proto_init() } +func file_envoy_type_v3_ratelimit_unit_proto_init() { + if File_envoy_type_v3_ratelimit_unit_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_ratelimit_unit_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_ratelimit_unit_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_ratelimit_unit_proto_depIdxs, + EnumInfos: file_envoy_type_v3_ratelimit_unit_proto_enumTypes, + }.Build() + File_envoy_type_v3_ratelimit_unit_proto = out.File + file_envoy_type_v3_ratelimit_unit_proto_rawDesc = nil + file_envoy_type_v3_ratelimit_unit_proto_goTypes = nil + file_envoy_type_v3_ratelimit_unit_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go new file mode 100644 index 000000000..17658400e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_unit.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go new file mode 100644 index 000000000..630e6567c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate +// expected behaviors and APIs, the patch version field is used only +// for security fixes and can be generally ignored. +type SemanticVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MajorNumber uint32 `protobuf:"varint,1,opt,name=major_number,json=majorNumber,proto3" json:"major_number,omitempty"` + MinorNumber uint32 `protobuf:"varint,2,opt,name=minor_number,json=minorNumber,proto3" json:"minor_number,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` +} + +func (x *SemanticVersion) Reset() { + *x = SemanticVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SemanticVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SemanticVersion) ProtoMessage() {} + +func (x *SemanticVersion) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SemanticVersion.ProtoReflect.Descriptor instead. +func (*SemanticVersion) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_semantic_version_proto_rawDescGZIP(), []int{0} +} + +func (x *SemanticVersion) GetMajorNumber() uint32 { + if x != nil { + return x.MajorNumber + } + return 0 +} + +func (x *SemanticVersion) GetMinorNumber() uint32 { + if x != nil { + return x.MinorNumber + } + return 0 +} + +func (x *SemanticVersion) GetPatch() uint32 { + if x != nil { + return x.Patch + } + return 0 +} + +var File_envoy_type_v3_semantic_version_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_semantic_version_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21, + 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7a, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x14, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_semantic_version_proto_rawDescOnce sync.Once + file_envoy_type_v3_semantic_version_proto_rawDescData = file_envoy_type_v3_semantic_version_proto_rawDesc +) + +func file_envoy_type_v3_semantic_version_proto_rawDescGZIP() []byte { + file_envoy_type_v3_semantic_version_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_semantic_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_semantic_version_proto_rawDescData) + }) + return file_envoy_type_v3_semantic_version_proto_rawDescData +} + +var file_envoy_type_v3_semantic_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_semantic_version_proto_goTypes = []interface{}{ + (*SemanticVersion)(nil), // 0: envoy.type.v3.SemanticVersion +} +var file_envoy_type_v3_semantic_version_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_semantic_version_proto_init() } +func file_envoy_type_v3_semantic_version_proto_init() { + if File_envoy_type_v3_semantic_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_semantic_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SemanticVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_semantic_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_semantic_version_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_semantic_version_proto_depIdxs, + MessageInfos: file_envoy_type_v3_semantic_version_proto_msgTypes, + }.Build() + File_envoy_type_v3_semantic_version_proto = out.File + file_envoy_type_v3_semantic_version_proto_rawDesc = nil + file_envoy_type_v3_semantic_version_proto_goTypes = nil + file_envoy_type_v3_semantic_version_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go new file mode 100644 index 000000000..af3b6ee41 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go @@ -0,0 +1,143 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SemanticVersion with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SemanticVersion) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SemanticVersion with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SemanticVersionMultiError, or nil if none found. +func (m *SemanticVersion) ValidateAll() error { + return m.validate(true) +} + +func (m *SemanticVersion) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MajorNumber + + // no validation rules for MinorNumber + + // no validation rules for Patch + + if len(errors) > 0 { + return SemanticVersionMultiError(errors) + } + + return nil +} + +// SemanticVersionMultiError is an error wrapping multiple validation errors +// returned by SemanticVersion.ValidateAll() if the designated constraints +// aren't met. +type SemanticVersionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SemanticVersionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SemanticVersionMultiError) AllErrors() []error { return m } + +// SemanticVersionValidationError is the validation error returned by +// SemanticVersion.Validate if the designated constraints aren't met. +type SemanticVersionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SemanticVersionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SemanticVersionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SemanticVersionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SemanticVersionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SemanticVersionValidationError) ErrorName() string { return "SemanticVersionValidationError" } + +// Error satisfies the builtin error interface +func (e SemanticVersionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSemanticVersion.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SemanticVersionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SemanticVersionValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go new file mode 100644 index 000000000..13810fe82 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SemanticVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemanticVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SemanticVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Patch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Patch)) + i-- + dAtA[i] = 0x18 + } + if m.MinorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinorNumber)) + i-- + dAtA[i] = 0x10 + } + if m.MajorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MajorNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SemanticVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MajorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MajorNumber)) + } + if m.MinorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinorNumber)) + } + if m.Patch != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Patch)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go new file mode 100644 index 000000000..9c21f2454 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configures a token bucket, typically used for rate limiting. +type TokenBucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket + // initially contains. + MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` + // The number of tokens added to the bucket during each fill interval. If not specified, defaults + // to a single token. + TokensPerFill *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` + // The fill interval that tokens are added to the bucket. During each fill interval + // “tokens_per_fill“ are added to the bucket. The bucket will never contain more than + // “max_tokens“ tokens. + FillInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` +} + +func (x *TokenBucket) Reset() { + *x = TokenBucket{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TokenBucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TokenBucket) ProtoMessage() {} + +func (x *TokenBucket) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TokenBucket.ProtoReflect.Descriptor instead. +func (*TokenBucket) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_token_bucket_proto_rawDescGZIP(), []int{0} +} + +func (x *TokenBucket) GetMaxTokens() uint32 { + if x != nil { + return x.MaxTokens + } + return 0 +} + +func (x *TokenBucket) GetTokensPerFill() *wrapperspb.UInt32Value { + if x != nil { + return x.TokensPerFill + } + return nil +} + +func (x *TokenBucket) GetFillInterval() *durationpb.Duration { + if x != nil { + return x.FillInterval + } + return nil +} + +var File_envoy_type_v3_token_bucket_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_token_bucket_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x01, 0x0a, + 0x0b, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0a, + 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x50, 0x65, 0x72, 0x46, + 0x69, 0x6c, 0x6c, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, + 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, + 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x76, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_token_bucket_proto_rawDescOnce sync.Once + file_envoy_type_v3_token_bucket_proto_rawDescData = file_envoy_type_v3_token_bucket_proto_rawDesc +) + +func file_envoy_type_v3_token_bucket_proto_rawDescGZIP() []byte { + file_envoy_type_v3_token_bucket_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_token_bucket_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_token_bucket_proto_rawDescData) + }) + return file_envoy_type_v3_token_bucket_proto_rawDescData +} + +var file_envoy_type_v3_token_bucket_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_token_bucket_proto_goTypes = []interface{}{ + (*TokenBucket)(nil), // 0: envoy.type.v3.TokenBucket + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration +} +var file_envoy_type_v3_token_bucket_proto_depIdxs = []int32{ + 1, // 0: envoy.type.v3.TokenBucket.tokens_per_fill:type_name -> google.protobuf.UInt32Value + 2, // 1: envoy.type.v3.TokenBucket.fill_interval:type_name -> google.protobuf.Duration + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_token_bucket_proto_init() } +func file_envoy_type_v3_token_bucket_proto_init() { + if File_envoy_type_v3_token_bucket_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_token_bucket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TokenBucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_token_bucket_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_token_bucket_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_token_bucket_proto_depIdxs, + MessageInfos: file_envoy_type_v3_token_bucket_proto_msgTypes, + }.Build() + File_envoy_type_v3_token_bucket_proto = out.File + file_envoy_type_v3_token_bucket_proto_rawDesc = nil + file_envoy_type_v3_token_bucket_proto_goTypes = nil + file_envoy_type_v3_token_bucket_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go new file mode 100644 index 000000000..4f2607621 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go @@ -0,0 +1,203 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TokenBucket with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TokenBucket) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TokenBucket with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TokenBucketMultiError, or +// nil if none found. +func (m *TokenBucket) ValidateAll() error { + return m.validate(true) +} + +func (m *TokenBucket) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMaxTokens() <= 0 { + err := TokenBucketValidationError{ + field: "MaxTokens", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetTokensPerFill(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := TokenBucketValidationError{ + field: "TokensPerFill", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if m.GetFillInterval() == nil { + err := TokenBucketValidationError{ + field: "FillInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetFillInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = TokenBucketValidationError{ + field: "FillInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := TokenBucketValidationError{ + field: "FillInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return TokenBucketMultiError(errors) + } + + return nil +} + +// TokenBucketMultiError is an error wrapping multiple validation errors +// returned by TokenBucket.ValidateAll() if the designated constraints aren't met. +type TokenBucketMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TokenBucketMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TokenBucketMultiError) AllErrors() []error { return m } + +// TokenBucketValidationError is the validation error returned by +// TokenBucket.Validate if the designated constraints aren't met. +type TokenBucketValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TokenBucketValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TokenBucketValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TokenBucketValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TokenBucketValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TokenBucketValidationError) ErrorName() string { return "TokenBucketValidationError" } + +// Error satisfies the builtin error interface +func (e TokenBucketValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTokenBucket.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TokenBucketValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TokenBucketValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go new file mode 100644 index 000000000..8ab53eaf2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go @@ -0,0 +1,100 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TokenBucket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillInterval != nil { + size, err := (*durationpb.Duration)(m.FillInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.TokensPerFill != nil { + size, err := (*wrapperspb.UInt32Value)(m.TokensPerFill).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTokens)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTokens)) + } + if m.TokensPerFill != nil { + l = (*wrapperspb.UInt32Value)(m.TokensPerFill).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillInterval != nil { + l = (*durationpb.Duration)(m.FillInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD new file mode 100644 index 000000000..a9d38c51b --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD @@ -0,0 +1,74 @@ +load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_java//java:defs.bzl", "java_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = + ["//visibility:public"], +) + +proto_library( + name = "validate_proto", + srcs = ["validate.proto"], + deps = [ + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +cc_proto_library( + name = "validate_cc", + deps = [":validate_proto"], +) + +py_proto_library( + name = "validate_py", + srcs = ["validate.proto"], + deps = ["@com_google_protobuf//:protobuf_python"], +) + +go_proto_library( + name = "validate_go_proto", + importpath = "github.com/envoyproxy/protoc-gen-validate/validate", + proto = ":validate_proto", +) + +cc_library( + name = "cc_validate", + hdrs = ["validate.h"], +) + +go_library( + name = "validate_go", + embed = [":validate_go_proto"], + importpath = "github.com/envoyproxy/protoc-gen-validate/validate", +) + +java_proto_library( + name = "validate_java", + deps = [":validate_proto"], +) + +filegroup( + name = "validate_src", + srcs = ["validate.proto"], +) + +alias( + name = "go_default_library", + actual = ":validate", + deprecation = "Use :validate instead of :go_default_library. Details about the new naming convention: https://github.com/bazelbuild/bazel-gazelle/pull/863", + visibility = ["//visibility:public"], +) + +# this alias allows build files generated with Gazelle in other repositories +# to find validate as an external dependency +alias( + name = "validate", + actual = ":validate_go", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h new file mode 100644 index 000000000..d6cf6c9d9 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h @@ -0,0 +1,183 @@ +#ifndef _VALIDATE_H +#define _VALIDATE_H + +#include +#include +#include +#include +#include +#include +#include + +#if !defined(_WIN32) +#include +#else +#include +#include + +// uses macros to #define a ton of symbols, +// many of which interfere with our code here and down +// the line in various extensions. +#undef DELETE +#undef ERROR +#undef GetMessage +#undef interface +#undef TRUE +#undef min + +#endif + +#include "google/protobuf/message.h" + +namespace pgv { +using std::string; + +class UnimplementedException : public std::runtime_error { +public: + UnimplementedException() : std::runtime_error("not yet implemented") {} + UnimplementedException(const std::string& message) : std::runtime_error(message) {} + // Thrown by C++ validation code that is not yet implemented. +}; + +using ValidationMsg = std::string; + +class BaseValidator { +public: + /** + * Validate/check a generic message object with a registered validator for the concrete message + * type. + * @param m supplies the message to check. + * @param err supplies the place to return error information. + * @return true if the validation passes OR there is no registered validator for the concrete + * message type. false is returned if validation explicitly fails. + */ + static bool AbstractCheckMessage(const google::protobuf::Message& m, ValidationMsg* err) { + // Polymorphic lookup is used to see if there is a matching concrete validator. If so, call it. + // Otherwise return success. + auto it = abstractValidators().find(std::type_index(typeid(m))); + if (it == abstractValidators().end()) { + return true; + } + return it->second(m, err); + } + +protected: + // Used to implement AbstractCheckMessage() above. Every message that is linked into the binary + // will register itself by type_index, allowing for polymorphic lookup later. + static std::unordered_map>& + abstractValidators() { + static auto* validator_map = new std::unordered_map< + std::type_index, std::function>(); + return *validator_map; + } +}; + +template class Validator : public BaseValidator { +public: + Validator(std::function check) : check_(check) { + abstractValidators()[std::type_index(typeid(T))] = [this](const google::protobuf::Message& m, + ValidationMsg* err) -> bool { + return check_(dynamic_cast(m), err); + }; + } + +private: + std::function check_; +}; + +static inline std::string String(const ValidationMsg& msg) { return std::string(msg); } + +static inline bool IsPrefix(const string& maybe_prefix, const string& search_in) { + return search_in.compare(0, maybe_prefix.size(), maybe_prefix) == 0; +} + +static inline bool IsSuffix(const string& maybe_suffix, const string& search_in) { + return maybe_suffix.size() <= search_in.size() && + search_in.compare(search_in.size() - maybe_suffix.size(), maybe_suffix.size(), + maybe_suffix) == 0; +} + +static inline bool Contains(const string& search_in, const string& to_find) { + return search_in.find(to_find) != string::npos; +} + +static inline bool NotContains(const string& search_in, const string& to_find) { + return !Contains(search_in, to_find); +} + +static inline bool IsIpv4(const string& to_validate) { + struct sockaddr_in sa; + return !(inet_pton(AF_INET, to_validate.c_str(), &sa.sin_addr) < 1); +} + +static inline bool IsIpv6(const string& to_validate) { + struct sockaddr_in6 sa_six; + return !(inet_pton(AF_INET6, to_validate.c_str(), &sa_six.sin6_addr) < 1); +} + +static inline bool IsIp(const string& to_validate) { + return IsIpv4(to_validate) || IsIpv6(to_validate); +} + +static inline bool IsHostname(const string& to_validate) { + if (to_validate.length() > 253) { + return false; + } + + const std::regex dot_regex{"\\."}; + const auto iter_end = std::sregex_token_iterator(); + auto iter = std::sregex_token_iterator(to_validate.begin(), to_validate.end(), dot_regex, -1); + for (; iter != iter_end; ++iter) { + const std::string& part = *iter; + if (part.empty() || part.length() > 63) { + return false; + } + if (part.at(0) == '-') { + return false; + } + if (part.at(part.length() - 1) == '-') { + return false; + } + for (const auto& character : part) { + if ((character < 'A' || character > 'Z') && (character < 'a' || character > 'z') && + (character < '0' || character > '9') && character != '-') { + return false; + } + } + } + + return true; +} + +namespace { + +inline int OneCharLen(const char* src) { + return "\1\1\1\1\1\1\1\1\1\1\1\1\2\2\3\4"[(*src & 0xFF) >> 4]; +} + +inline int UTF8FirstLetterNumBytes(const char *utf8_str, int str_len) { + if (str_len == 0) + return 0; + return OneCharLen(utf8_str); +} + +inline size_t Utf8Len(const string& narrow_string) { + const char* str_char = narrow_string.c_str(); + ptrdiff_t byte_len = narrow_string.length(); + size_t unicode_len = 0; + int char_len = 1; + while (byte_len > 0 && char_len > 0) { + char_len = UTF8FirstLetterNumBytes(str_char, byte_len); + str_char += char_len; + byte_len -= char_len; + ++unicode_len; + } + return unicode_len; +} + +} // namespace + +} // namespace pgv + +#endif // _VALIDATE_H diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go new file mode 100644 index 000000000..6df95e89e --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go @@ -0,0 +1,4106 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: validate/validate.proto + +package validate + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// WellKnownRegex contain some well-known patterns. +type KnownRegex int32 + +const ( + KnownRegex_UNKNOWN KnownRegex = 0 + // HTTP header name as defined by RFC 7230. + KnownRegex_HTTP_HEADER_NAME KnownRegex = 1 + // HTTP header value as defined by RFC 7230. + KnownRegex_HTTP_HEADER_VALUE KnownRegex = 2 +) + +// Enum value maps for KnownRegex. +var ( + KnownRegex_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP_HEADER_NAME", + 2: "HTTP_HEADER_VALUE", + } + KnownRegex_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP_HEADER_NAME": 1, + "HTTP_HEADER_VALUE": 2, + } +) + +func (x KnownRegex) Enum() *KnownRegex { + p := new(KnownRegex) + *p = x + return p +} + +func (x KnownRegex) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KnownRegex) Descriptor() protoreflect.EnumDescriptor { + return file_validate_validate_proto_enumTypes[0].Descriptor() +} + +func (KnownRegex) Type() protoreflect.EnumType { + return &file_validate_validate_proto_enumTypes[0] +} + +func (x KnownRegex) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *KnownRegex) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = KnownRegex(num) + return nil +} + +// Deprecated: Use KnownRegex.Descriptor instead. +func (KnownRegex) EnumDescriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{0} +} + +// FieldRules encapsulates the rules for each type of field. Depending on the +// field, the correct set should be used to ensure proper validations. +type FieldRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message *MessageRules `protobuf:"bytes,17,opt,name=message" json:"message,omitempty"` + // Types that are assignable to Type: + // + // *FieldRules_Float + // *FieldRules_Double + // *FieldRules_Int32 + // *FieldRules_Int64 + // *FieldRules_Uint32 + // *FieldRules_Uint64 + // *FieldRules_Sint32 + // *FieldRules_Sint64 + // *FieldRules_Fixed32 + // *FieldRules_Fixed64 + // *FieldRules_Sfixed32 + // *FieldRules_Sfixed64 + // *FieldRules_Bool + // *FieldRules_String_ + // *FieldRules_Bytes + // *FieldRules_Enum + // *FieldRules_Repeated + // *FieldRules_Map + // *FieldRules_Any + // *FieldRules_Duration + // *FieldRules_Timestamp + Type isFieldRules_Type `protobuf_oneof:"type"` +} + +func (x *FieldRules) Reset() { + *x = FieldRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldRules) ProtoMessage() {} + +func (x *FieldRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldRules.ProtoReflect.Descriptor instead. +func (*FieldRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldRules) GetMessage() *MessageRules { + if x != nil { + return x.Message + } + return nil +} + +func (m *FieldRules) GetType() isFieldRules_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *FieldRules) GetFloat() *FloatRules { + if x, ok := x.GetType().(*FieldRules_Float); ok { + return x.Float + } + return nil +} + +func (x *FieldRules) GetDouble() *DoubleRules { + if x, ok := x.GetType().(*FieldRules_Double); ok { + return x.Double + } + return nil +} + +func (x *FieldRules) GetInt32() *Int32Rules { + if x, ok := x.GetType().(*FieldRules_Int32); ok { + return x.Int32 + } + return nil +} + +func (x *FieldRules) GetInt64() *Int64Rules { + if x, ok := x.GetType().(*FieldRules_Int64); ok { + return x.Int64 + } + return nil +} + +func (x *FieldRules) GetUint32() *UInt32Rules { + if x, ok := x.GetType().(*FieldRules_Uint32); ok { + return x.Uint32 + } + return nil +} + +func (x *FieldRules) GetUint64() *UInt64Rules { + if x, ok := x.GetType().(*FieldRules_Uint64); ok { + return x.Uint64 + } + return nil +} + +func (x *FieldRules) GetSint32() *SInt32Rules { + if x, ok := x.GetType().(*FieldRules_Sint32); ok { + return x.Sint32 + } + return nil +} + +func (x *FieldRules) GetSint64() *SInt64Rules { + if x, ok := x.GetType().(*FieldRules_Sint64); ok { + return x.Sint64 + } + return nil +} + +func (x *FieldRules) GetFixed32() *Fixed32Rules { + if x, ok := x.GetType().(*FieldRules_Fixed32); ok { + return x.Fixed32 + } + return nil +} + +func (x *FieldRules) GetFixed64() *Fixed64Rules { + if x, ok := x.GetType().(*FieldRules_Fixed64); ok { + return x.Fixed64 + } + return nil +} + +func (x *FieldRules) GetSfixed32() *SFixed32Rules { + if x, ok := x.GetType().(*FieldRules_Sfixed32); ok { + return x.Sfixed32 + } + return nil +} + +func (x *FieldRules) GetSfixed64() *SFixed64Rules { + if x, ok := x.GetType().(*FieldRules_Sfixed64); ok { + return x.Sfixed64 + } + return nil +} + +func (x *FieldRules) GetBool() *BoolRules { + if x, ok := x.GetType().(*FieldRules_Bool); ok { + return x.Bool + } + return nil +} + +func (x *FieldRules) GetString_() *StringRules { + if x, ok := x.GetType().(*FieldRules_String_); ok { + return x.String_ + } + return nil +} + +func (x *FieldRules) GetBytes() *BytesRules { + if x, ok := x.GetType().(*FieldRules_Bytes); ok { + return x.Bytes + } + return nil +} + +func (x *FieldRules) GetEnum() *EnumRules { + if x, ok := x.GetType().(*FieldRules_Enum); ok { + return x.Enum + } + return nil +} + +func (x *FieldRules) GetRepeated() *RepeatedRules { + if x, ok := x.GetType().(*FieldRules_Repeated); ok { + return x.Repeated + } + return nil +} + +func (x *FieldRules) GetMap() *MapRules { + if x, ok := x.GetType().(*FieldRules_Map); ok { + return x.Map + } + return nil +} + +func (x *FieldRules) GetAny() *AnyRules { + if x, ok := x.GetType().(*FieldRules_Any); ok { + return x.Any + } + return nil +} + +func (x *FieldRules) GetDuration() *DurationRules { + if x, ok := x.GetType().(*FieldRules_Duration); ok { + return x.Duration + } + return nil +} + +func (x *FieldRules) GetTimestamp() *TimestampRules { + if x, ok := x.GetType().(*FieldRules_Timestamp); ok { + return x.Timestamp + } + return nil +} + +type isFieldRules_Type interface { + isFieldRules_Type() +} + +type FieldRules_Float struct { + // Scalar Field Types + Float *FloatRules `protobuf:"bytes,1,opt,name=float,oneof"` +} + +type FieldRules_Double struct { + Double *DoubleRules `protobuf:"bytes,2,opt,name=double,oneof"` +} + +type FieldRules_Int32 struct { + Int32 *Int32Rules `protobuf:"bytes,3,opt,name=int32,oneof"` +} + +type FieldRules_Int64 struct { + Int64 *Int64Rules `protobuf:"bytes,4,opt,name=int64,oneof"` +} + +type FieldRules_Uint32 struct { + Uint32 *UInt32Rules `protobuf:"bytes,5,opt,name=uint32,oneof"` +} + +type FieldRules_Uint64 struct { + Uint64 *UInt64Rules `protobuf:"bytes,6,opt,name=uint64,oneof"` +} + +type FieldRules_Sint32 struct { + Sint32 *SInt32Rules `protobuf:"bytes,7,opt,name=sint32,oneof"` +} + +type FieldRules_Sint64 struct { + Sint64 *SInt64Rules `protobuf:"bytes,8,opt,name=sint64,oneof"` +} + +type FieldRules_Fixed32 struct { + Fixed32 *Fixed32Rules `protobuf:"bytes,9,opt,name=fixed32,oneof"` +} + +type FieldRules_Fixed64 struct { + Fixed64 *Fixed64Rules `protobuf:"bytes,10,opt,name=fixed64,oneof"` +} + +type FieldRules_Sfixed32 struct { + Sfixed32 *SFixed32Rules `protobuf:"bytes,11,opt,name=sfixed32,oneof"` +} + +type FieldRules_Sfixed64 struct { + Sfixed64 *SFixed64Rules `protobuf:"bytes,12,opt,name=sfixed64,oneof"` +} + +type FieldRules_Bool struct { + Bool *BoolRules `protobuf:"bytes,13,opt,name=bool,oneof"` +} + +type FieldRules_String_ struct { + String_ *StringRules `protobuf:"bytes,14,opt,name=string,oneof"` +} + +type FieldRules_Bytes struct { + Bytes *BytesRules `protobuf:"bytes,15,opt,name=bytes,oneof"` +} + +type FieldRules_Enum struct { + // Complex Field Types + Enum *EnumRules `protobuf:"bytes,16,opt,name=enum,oneof"` +} + +type FieldRules_Repeated struct { + Repeated *RepeatedRules `protobuf:"bytes,18,opt,name=repeated,oneof"` +} + +type FieldRules_Map struct { + Map *MapRules `protobuf:"bytes,19,opt,name=map,oneof"` +} + +type FieldRules_Any struct { + // Well-Known Field Types + Any *AnyRules `protobuf:"bytes,20,opt,name=any,oneof"` +} + +type FieldRules_Duration struct { + Duration *DurationRules `protobuf:"bytes,21,opt,name=duration,oneof"` +} + +type FieldRules_Timestamp struct { + Timestamp *TimestampRules `protobuf:"bytes,22,opt,name=timestamp,oneof"` +} + +func (*FieldRules_Float) isFieldRules_Type() {} + +func (*FieldRules_Double) isFieldRules_Type() {} + +func (*FieldRules_Int32) isFieldRules_Type() {} + +func (*FieldRules_Int64) isFieldRules_Type() {} + +func (*FieldRules_Uint32) isFieldRules_Type() {} + +func (*FieldRules_Uint64) isFieldRules_Type() {} + +func (*FieldRules_Sint32) isFieldRules_Type() {} + +func (*FieldRules_Sint64) isFieldRules_Type() {} + +func (*FieldRules_Fixed32) isFieldRules_Type() {} + +func (*FieldRules_Fixed64) isFieldRules_Type() {} + +func (*FieldRules_Sfixed32) isFieldRules_Type() {} + +func (*FieldRules_Sfixed64) isFieldRules_Type() {} + +func (*FieldRules_Bool) isFieldRules_Type() {} + +func (*FieldRules_String_) isFieldRules_Type() {} + +func (*FieldRules_Bytes) isFieldRules_Type() {} + +func (*FieldRules_Enum) isFieldRules_Type() {} + +func (*FieldRules_Repeated) isFieldRules_Type() {} + +func (*FieldRules_Map) isFieldRules_Type() {} + +func (*FieldRules_Any) isFieldRules_Type() {} + +func (*FieldRules_Duration) isFieldRules_Type() {} + +func (*FieldRules_Timestamp) isFieldRules_Type() {} + +// FloatRules describes the constraints applied to `float` values +type FloatRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *float32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *float32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *float32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *float32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *float32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []float32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []float32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *FloatRules) Reset() { + *x = FloatRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatRules) ProtoMessage() {} + +func (x *FloatRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatRules.ProtoReflect.Descriptor instead. +func (*FloatRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatRules) GetConst() float32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *FloatRules) GetLt() float32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *FloatRules) GetLte() float32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *FloatRules) GetGt() float32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *FloatRules) GetGte() float32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *FloatRules) GetIn() []float32 { + if x != nil { + return x.In + } + return nil +} + +func (x *FloatRules) GetNotIn() []float32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *FloatRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// DoubleRules describes the constraints applied to `double` values +type DoubleRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *float64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *float64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *float64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *float64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *float64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []float64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []float64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *DoubleRules) Reset() { + *x = DoubleRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRules) ProtoMessage() {} + +func (x *DoubleRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRules.ProtoReflect.Descriptor instead. +func (*DoubleRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRules) GetConst() float64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *DoubleRules) GetLt() float64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *DoubleRules) GetLte() float64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *DoubleRules) GetGt() float64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *DoubleRules) GetGte() float64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *DoubleRules) GetIn() []float64 { + if x != nil { + return x.In + } + return nil +} + +func (x *DoubleRules) GetNotIn() []float64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *DoubleRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Int32Rules describes the constraints applied to `int32` values +type Int32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Int32Rules) Reset() { + *x = Int32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Rules) ProtoMessage() {} + +func (x *Int32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Rules.ProtoReflect.Descriptor instead. +func (*Int32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{3} +} + +func (x *Int32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Int32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Int32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Int32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Int32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Int64Rules describes the constraints applied to `int64` values +type Int64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Int64Rules) Reset() { + *x = Int64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Rules) ProtoMessage() {} + +func (x *Int64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Rules.ProtoReflect.Descriptor instead. +func (*Int64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{4} +} + +func (x *Int64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Int64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Int64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Int64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Int64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// UInt32Rules describes the constraints applied to `uint32` values +type UInt32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *UInt32Rules) Reset() { + *x = UInt32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Rules) ProtoMessage() {} + +func (x *UInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Rules.ProtoReflect.Descriptor instead. +func (*UInt32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt32Rules) GetLt() uint32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *UInt32Rules) GetLte() uint32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *UInt32Rules) GetGt() uint32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *UInt32Rules) GetGte() uint32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *UInt32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// UInt64Rules describes the constraints applied to `uint64` values +type UInt64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *UInt64Rules) Reset() { + *x = UInt64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Rules) ProtoMessage() {} + +func (x *UInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Rules.ProtoReflect.Descriptor instead. +func (*UInt64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{6} +} + +func (x *UInt64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt64Rules) GetLt() uint64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *UInt64Rules) GetLte() uint64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *UInt64Rules) GetGt() uint64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *UInt64Rules) GetGte() uint64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *UInt64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SInt32Rules describes the constraints applied to `sint32` values +type SInt32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"zigzag32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"zigzag32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"zigzag32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"zigzag32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"zigzag32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"zigzag32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"zigzag32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SInt32Rules) Reset() { + *x = SInt32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt32Rules) ProtoMessage() {} + +func (x *SInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SInt32Rules.ProtoReflect.Descriptor instead. +func (*SInt32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{7} +} + +func (x *SInt32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SInt32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SInt32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SInt32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SInt32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SInt64Rules describes the constraints applied to `sint64` values +type SInt64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"zigzag64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"zigzag64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"zigzag64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"zigzag64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"zigzag64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"zigzag64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"zigzag64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SInt64Rules) Reset() { + *x = SInt64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt64Rules) ProtoMessage() {} + +func (x *SInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SInt64Rules.ProtoReflect.Descriptor instead. +func (*SInt64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{8} +} + +func (x *SInt64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SInt64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SInt64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SInt64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SInt64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Fixed32Rules describes the constraints applied to `fixed32` values +type Fixed32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Fixed32Rules) Reset() { + *x = Fixed32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Fixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed32Rules) ProtoMessage() {} + +func (x *Fixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Fixed32Rules.ProtoReflect.Descriptor instead. +func (*Fixed32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{9} +} + +func (x *Fixed32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed32Rules) GetLt() uint32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Fixed32Rules) GetLte() uint32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Fixed32Rules) GetGt() uint32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Fixed32Rules) GetGte() uint32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Fixed32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Fixed64Rules describes the constraints applied to `fixed64` values +type Fixed64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Fixed64Rules) Reset() { + *x = Fixed64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Fixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed64Rules) ProtoMessage() {} + +func (x *Fixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Fixed64Rules.ProtoReflect.Descriptor instead. +func (*Fixed64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{10} +} + +func (x *Fixed64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed64Rules) GetLt() uint64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Fixed64Rules) GetLte() uint64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Fixed64Rules) GetGt() uint64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Fixed64Rules) GetGte() uint64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Fixed64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SFixed32Rules describes the constraints applied to `sfixed32` values +type SFixed32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SFixed32Rules) Reset() { + *x = SFixed32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SFixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed32Rules) ProtoMessage() {} + +func (x *SFixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SFixed32Rules.ProtoReflect.Descriptor instead. +func (*SFixed32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{11} +} + +func (x *SFixed32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SFixed32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SFixed32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SFixed32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SFixed32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SFixed64Rules describes the constraints applied to `sfixed64` values +type SFixed64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SFixed64Rules) Reset() { + *x = SFixed64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SFixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed64Rules) ProtoMessage() {} + +func (x *SFixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SFixed64Rules.ProtoReflect.Descriptor instead. +func (*SFixed64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{12} +} + +func (x *SFixed64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SFixed64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SFixed64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SFixed64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SFixed64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// BoolRules describes the constraints applied to `bool` values +type BoolRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *bool `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` +} + +func (x *BoolRules) Reset() { + *x = BoolRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoolRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolRules) ProtoMessage() {} + +func (x *BoolRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolRules.ProtoReflect.Descriptor instead. +func (*BoolRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{13} +} + +func (x *BoolRules) GetConst() bool { + if x != nil && x.Const != nil { + return *x.Const + } + return false +} + +// StringRules describe the constraints applied to `string` values +type StringRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *string `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + Len *uint64 `protobuf:"varint,19,opt,name=len" json:"len,omitempty"` + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // LenBytes specifies that this field must be the specified number of bytes + LenBytes *uint64 `protobuf:"varint,20,opt,name=len_bytes,json=lenBytes" json:"len_bytes,omitempty"` + // MinBytes specifies that this field must be the specified number of bytes + // at a minimum + MinBytes *uint64 `protobuf:"varint,4,opt,name=min_bytes,json=minBytes" json:"min_bytes,omitempty"` + // MaxBytes specifies that this field must be the specified number of bytes + // at a maximum + MaxBytes *uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"` + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string `protobuf:"bytes,6,opt,name=pattern" json:"pattern,omitempty"` + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + Prefix *string `protobuf:"bytes,7,opt,name=prefix" json:"prefix,omitempty"` + // Suffix specifies that this field must have the specified substring at + // the end of the string. + Suffix *string `protobuf:"bytes,8,opt,name=suffix" json:"suffix,omitempty"` + // Contains specifies that this field must have the specified substring + // anywhere in the string. + Contains *string `protobuf:"bytes,9,opt,name=contains" json:"contains,omitempty"` + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains" json:"not_contains,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []string `protobuf:"bytes,10,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // WellKnown rules provide advanced constraints against common string + // patterns + // + // Types that are assignable to WellKnown: + // + // *StringRules_Email + // *StringRules_Hostname + // *StringRules_Ip + // *StringRules_Ipv4 + // *StringRules_Ipv6 + // *StringRules_Uri + // *StringRules_UriRef + // *StringRules_Address + // *StringRules_Uuid + // *StringRules_WellKnownRegex + WellKnown isStringRules_WellKnown `protobuf_oneof:"well_known"` + // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable + // strict header validation. + // By default, this is true, and HTTP header validations are RFC-compliant. + // Setting to false will enable a looser validations that only disallows + // \r\n\0 characters, which can be used to bypass header matching rules. + Strict *bool `protobuf:"varint,25,opt,name=strict,def=1" json:"strict,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,26,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +// Default values for StringRules fields. +const ( + Default_StringRules_Strict = bool(true) +) + +func (x *StringRules) Reset() { + *x = StringRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringRules) ProtoMessage() {} + +func (x *StringRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringRules.ProtoReflect.Descriptor instead. +func (*StringRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{14} +} + +func (x *StringRules) GetConst() string { + if x != nil && x.Const != nil { + return *x.Const + } + return "" +} + +func (x *StringRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *StringRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *StringRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *StringRules) GetLenBytes() uint64 { + if x != nil && x.LenBytes != nil { + return *x.LenBytes + } + return 0 +} + +func (x *StringRules) GetMinBytes() uint64 { + if x != nil && x.MinBytes != nil { + return *x.MinBytes + } + return 0 +} + +func (x *StringRules) GetMaxBytes() uint64 { + if x != nil && x.MaxBytes != nil { + return *x.MaxBytes + } + return 0 +} + +func (x *StringRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *StringRules) GetPrefix() string { + if x != nil && x.Prefix != nil { + return *x.Prefix + } + return "" +} + +func (x *StringRules) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +func (x *StringRules) GetContains() string { + if x != nil && x.Contains != nil { + return *x.Contains + } + return "" +} + +func (x *StringRules) GetNotContains() string { + if x != nil && x.NotContains != nil { + return *x.NotContains + } + return "" +} + +func (x *StringRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *StringRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +func (m *StringRules) GetWellKnown() isStringRules_WellKnown { + if m != nil { + return m.WellKnown + } + return nil +} + +func (x *StringRules) GetEmail() bool { + if x, ok := x.GetWellKnown().(*StringRules_Email); ok { + return x.Email + } + return false +} + +func (x *StringRules) GetHostname() bool { + if x, ok := x.GetWellKnown().(*StringRules_Hostname); ok { + return x.Hostname + } + return false +} + +func (x *StringRules) GetIp() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ip); ok { + return x.Ip + } + return false +} + +func (x *StringRules) GetIpv4() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ipv4); ok { + return x.Ipv4 + } + return false +} + +func (x *StringRules) GetIpv6() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ipv6); ok { + return x.Ipv6 + } + return false +} + +func (x *StringRules) GetUri() bool { + if x, ok := x.GetWellKnown().(*StringRules_Uri); ok { + return x.Uri + } + return false +} + +func (x *StringRules) GetUriRef() bool { + if x, ok := x.GetWellKnown().(*StringRules_UriRef); ok { + return x.UriRef + } + return false +} + +func (x *StringRules) GetAddress() bool { + if x, ok := x.GetWellKnown().(*StringRules_Address); ok { + return x.Address + } + return false +} + +func (x *StringRules) GetUuid() bool { + if x, ok := x.GetWellKnown().(*StringRules_Uuid); ok { + return x.Uuid + } + return false +} + +func (x *StringRules) GetWellKnownRegex() KnownRegex { + if x, ok := x.GetWellKnown().(*StringRules_WellKnownRegex); ok { + return x.WellKnownRegex + } + return KnownRegex_UNKNOWN +} + +func (x *StringRules) GetStrict() bool { + if x != nil && x.Strict != nil { + return *x.Strict + } + return Default_StringRules_Strict +} + +func (x *StringRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +type isStringRules_WellKnown interface { + isStringRules_WellKnown() +} + +type StringRules_Email struct { + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + Email bool `protobuf:"varint,12,opt,name=email,oneof"` +} + +type StringRules_Hostname struct { + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + Hostname bool `protobuf:"varint,13,opt,name=hostname,oneof"` +} + +type StringRules_Ip struct { + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + Ip bool `protobuf:"varint,14,opt,name=ip,oneof"` +} + +type StringRules_Ipv4 struct { + // Ipv4 specifies that the field must be a valid IPv4 address. + Ipv4 bool `protobuf:"varint,15,opt,name=ipv4,oneof"` +} + +type StringRules_Ipv6 struct { + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + Ipv6 bool `protobuf:"varint,16,opt,name=ipv6,oneof"` +} + +type StringRules_Uri struct { + // Uri specifies that the field must be a valid, absolute URI as defined + // by RFC 3986 + Uri bool `protobuf:"varint,17,opt,name=uri,oneof"` +} + +type StringRules_UriRef struct { + // UriRef specifies that the field must be a valid URI as defined by RFC + // 3986 and may be relative or absolute. + UriRef bool `protobuf:"varint,18,opt,name=uri_ref,json=uriRef,oneof"` +} + +type StringRules_Address struct { + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + Address bool `protobuf:"varint,21,opt,name=address,oneof"` +} + +type StringRules_Uuid struct { + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + Uuid bool `protobuf:"varint,22,opt,name=uuid,oneof"` +} + +type StringRules_WellKnownRegex struct { + // WellKnownRegex specifies a common well known pattern defined as a regex. + WellKnownRegex KnownRegex `protobuf:"varint,24,opt,name=well_known_regex,json=wellKnownRegex,enum=validate.KnownRegex,oneof"` +} + +func (*StringRules_Email) isStringRules_WellKnown() {} + +func (*StringRules_Hostname) isStringRules_WellKnown() {} + +func (*StringRules_Ip) isStringRules_WellKnown() {} + +func (*StringRules_Ipv4) isStringRules_WellKnown() {} + +func (*StringRules_Ipv6) isStringRules_WellKnown() {} + +func (*StringRules_Uri) isStringRules_WellKnown() {} + +func (*StringRules_UriRef) isStringRules_WellKnown() {} + +func (*StringRules_Address) isStringRules_WellKnown() {} + +func (*StringRules_Uuid) isStringRules_WellKnown() {} + +func (*StringRules_WellKnownRegex) isStringRules_WellKnown() {} + +// BytesRules describe the constraints applied to `bytes` values +type BytesRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const []byte `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // Len specifies that this field must be the specified number of bytes + Len *uint64 `protobuf:"varint,13,opt,name=len" json:"len,omitempty"` + // MinLen specifies that this field must be the specified number of bytes + // at a minimum + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // MaxLen specifies that this field must be the specified number of bytes + // at a maximum + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string `protobuf:"bytes,4,opt,name=pattern" json:"pattern,omitempty"` + // Prefix specifies that this field must have the specified bytes at the + // beginning of the string. + Prefix []byte `protobuf:"bytes,5,opt,name=prefix" json:"prefix,omitempty"` + // Suffix specifies that this field must have the specified bytes at the + // end of the string. + Suffix []byte `protobuf:"bytes,6,opt,name=suffix" json:"suffix,omitempty"` + // Contains specifies that this field must have the specified bytes + // anywhere in the string. + Contains []byte `protobuf:"bytes,7,opt,name=contains" json:"contains,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In [][]byte `protobuf:"bytes,8,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn [][]byte `protobuf:"bytes,9,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // WellKnown rules provide advanced constraints against common byte + // patterns + // + // Types that are assignable to WellKnown: + // + // *BytesRules_Ip + // *BytesRules_Ipv4 + // *BytesRules_Ipv6 + WellKnown isBytesRules_WellKnown `protobuf_oneof:"well_known"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,14,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *BytesRules) Reset() { + *x = BytesRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BytesRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesRules) ProtoMessage() {} + +func (x *BytesRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesRules.ProtoReflect.Descriptor instead. +func (*BytesRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{15} +} + +func (x *BytesRules) GetConst() []byte { + if x != nil { + return x.Const + } + return nil +} + +func (x *BytesRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *BytesRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *BytesRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *BytesRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *BytesRules) GetPrefix() []byte { + if x != nil { + return x.Prefix + } + return nil +} + +func (x *BytesRules) GetSuffix() []byte { + if x != nil { + return x.Suffix + } + return nil +} + +func (x *BytesRules) GetContains() []byte { + if x != nil { + return x.Contains + } + return nil +} + +func (x *BytesRules) GetIn() [][]byte { + if x != nil { + return x.In + } + return nil +} + +func (x *BytesRules) GetNotIn() [][]byte { + if x != nil { + return x.NotIn + } + return nil +} + +func (m *BytesRules) GetWellKnown() isBytesRules_WellKnown { + if m != nil { + return m.WellKnown + } + return nil +} + +func (x *BytesRules) GetIp() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ip); ok { + return x.Ip + } + return false +} + +func (x *BytesRules) GetIpv4() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ipv4); ok { + return x.Ipv4 + } + return false +} + +func (x *BytesRules) GetIpv6() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ipv6); ok { + return x.Ipv6 + } + return false +} + +func (x *BytesRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +type isBytesRules_WellKnown interface { + isBytesRules_WellKnown() +} + +type BytesRules_Ip struct { + // Ip specifies that the field must be a valid IP (v4 or v6) address in + // byte format + Ip bool `protobuf:"varint,10,opt,name=ip,oneof"` +} + +type BytesRules_Ipv4 struct { + // Ipv4 specifies that the field must be a valid IPv4 address in byte + // format + Ipv4 bool `protobuf:"varint,11,opt,name=ipv4,oneof"` +} + +type BytesRules_Ipv6 struct { + // Ipv6 specifies that the field must be a valid IPv6 address in byte + // format + Ipv6 bool `protobuf:"varint,12,opt,name=ipv6,oneof"` +} + +func (*BytesRules_Ip) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv4) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv6) isBytesRules_WellKnown() {} + +// EnumRules describe the constraints applied to enum values +type EnumRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // DefinedOnly specifies that this field must be only one of the defined + // values for this enum, failing on any undefined value. + DefinedOnly *bool `protobuf:"varint,2,opt,name=defined_only,json=definedOnly" json:"defined_only,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"varint,3,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"varint,4,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *EnumRules) Reset() { + *x = EnumRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumRules) ProtoMessage() {} + +func (x *EnumRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumRules.ProtoReflect.Descriptor instead. +func (*EnumRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{16} +} + +func (x *EnumRules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *EnumRules) GetDefinedOnly() bool { + if x != nil && x.DefinedOnly != nil { + return *x.DefinedOnly + } + return false +} + +func (x *EnumRules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *EnumRules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +// MessageRules describe the constraints applied to embedded message values. +// For message-type fields, validation is performed recursively. +type MessageRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Skip specifies that the validation rules of this field should not be + // evaluated + Skip *bool `protobuf:"varint,1,opt,name=skip" json:"skip,omitempty"` + // Required specifies that this field must be set + Required *bool `protobuf:"varint,2,opt,name=required" json:"required,omitempty"` +} + +func (x *MessageRules) Reset() { + *x = MessageRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageRules) ProtoMessage() {} + +func (x *MessageRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageRules.ProtoReflect.Descriptor instead. +func (*MessageRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{17} +} + +func (x *MessageRules) GetSkip() bool { + if x != nil && x.Skip != nil { + return *x.Skip + } + return false +} + +func (x *MessageRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +// RepeatedRules describe the constraints applied to `repeated` values +type RepeatedRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique *bool `protobuf:"varint,3,opt,name=unique" json:"unique,omitempty"` + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + Items *FieldRules `protobuf:"bytes,4,opt,name=items" json:"items,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,5,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *RepeatedRules) Reset() { + *x = RepeatedRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RepeatedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedRules) ProtoMessage() {} + +func (x *RepeatedRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RepeatedRules.ProtoReflect.Descriptor instead. +func (*RepeatedRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{18} +} + +func (x *RepeatedRules) GetMinItems() uint64 { + if x != nil && x.MinItems != nil { + return *x.MinItems + } + return 0 +} + +func (x *RepeatedRules) GetMaxItems() uint64 { + if x != nil && x.MaxItems != nil { + return *x.MaxItems + } + return 0 +} + +func (x *RepeatedRules) GetUnique() bool { + if x != nil && x.Unique != nil { + return *x.Unique + } + return false +} + +func (x *RepeatedRules) GetItems() *FieldRules { + if x != nil { + return x.Items + } + return nil +} + +func (x *RepeatedRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// MapRules describe the constraints applied to `map` values +type MapRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MinPairs specifies that this field must have the specified number of + // KVs at a minimum + MinPairs *uint64 `protobuf:"varint,1,opt,name=min_pairs,json=minPairs" json:"min_pairs,omitempty"` + // MaxPairs specifies that this field must have the specified number of + // KVs at a maximum + MaxPairs *uint64 `protobuf:"varint,2,opt,name=max_pairs,json=maxPairs" json:"max_pairs,omitempty"` + // NoSparse specifies values in this field cannot be unset. This only + // applies to map's with message value types. + NoSparse *bool `protobuf:"varint,3,opt,name=no_sparse,json=noSparse" json:"no_sparse,omitempty"` + // Keys specifies the constraints to be applied to each key in the field. + Keys *FieldRules `protobuf:"bytes,4,opt,name=keys" json:"keys,omitempty"` + // Values specifies the constraints to be applied to the value of each key + // in the field. Message values will still have their validations evaluated + // unless skip is specified here. + Values *FieldRules `protobuf:"bytes,5,opt,name=values" json:"values,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,6,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *MapRules) Reset() { + *x = MapRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapRules) ProtoMessage() {} + +func (x *MapRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapRules.ProtoReflect.Descriptor instead. +func (*MapRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{19} +} + +func (x *MapRules) GetMinPairs() uint64 { + if x != nil && x.MinPairs != nil { + return *x.MinPairs + } + return 0 +} + +func (x *MapRules) GetMaxPairs() uint64 { + if x != nil && x.MaxPairs != nil { + return *x.MaxPairs + } + return 0 +} + +func (x *MapRules) GetNoSparse() bool { + if x != nil && x.NoSparse != nil { + return *x.NoSparse + } + return false +} + +func (x *MapRules) GetKeys() *FieldRules { + if x != nil { + return x.Keys + } + return nil +} + +func (x *MapRules) GetValues() *FieldRules { + if x != nil { + return x.Values + } + return nil +} + +func (x *MapRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// AnyRules describe constraints applied exclusively to the +// `google.protobuf.Any` well-known type +type AnyRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // In specifies that this field's `type_url` must be equal to one of the + // specified values. + In []string `protobuf:"bytes,2,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field's `type_url` must not be equal to any of + // the specified values. + NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *AnyRules) Reset() { + *x = AnyRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyRules) ProtoMessage() {} + +func (x *AnyRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyRules.ProtoReflect.Descriptor instead. +func (*AnyRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{20} +} + +func (x *AnyRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *AnyRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *AnyRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +// DurationRules describe the constraints applied exclusively to the +// `google.protobuf.Duration` well-known type +type DurationRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Const specifies that this field must be exactly the specified value + Const *durationpb.Duration `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *durationpb.Duration `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"` + // Lt specifies that this field must be less than the specified value, + // inclusive + Lte *durationpb.Duration `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive + Gt *durationpb.Duration `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than the specified value, + // inclusive + Gte *durationpb.Duration `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []*durationpb.Duration `protobuf:"bytes,7,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []*durationpb.Duration `protobuf:"bytes,8,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *DurationRules) Reset() { + *x = DurationRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurationRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationRules) ProtoMessage() {} + +func (x *DurationRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurationRules.ProtoReflect.Descriptor instead. +func (*DurationRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{21} +} + +func (x *DurationRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *DurationRules) GetConst() *durationpb.Duration { + if x != nil { + return x.Const + } + return nil +} + +func (x *DurationRules) GetLt() *durationpb.Duration { + if x != nil { + return x.Lt + } + return nil +} + +func (x *DurationRules) GetLte() *durationpb.Duration { + if x != nil { + return x.Lte + } + return nil +} + +func (x *DurationRules) GetGt() *durationpb.Duration { + if x != nil { + return x.Gt + } + return nil +} + +func (x *DurationRules) GetGte() *durationpb.Duration { + if x != nil { + return x.Gte + } + return nil +} + +func (x *DurationRules) GetIn() []*durationpb.Duration { + if x != nil { + return x.In + } + return nil +} + +func (x *DurationRules) GetNotIn() []*durationpb.Duration { + if x != nil { + return x.NotIn + } + return nil +} + +// TimestampRules describe the constraints applied exclusively to the +// `google.protobuf.Timestamp` well-known type +type TimestampRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Const specifies that this field must be exactly the specified value + Const *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than the specified value, + // inclusive + Lte *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive + Gt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than the specified value, + // inclusive + Gte *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"` + // LtNow specifies that this must be less than the current time. LtNow + // can only be used with the Within rule. + LtNow *bool `protobuf:"varint,7,opt,name=lt_now,json=ltNow" json:"lt_now,omitempty"` + // GtNow specifies that this must be greater than the current time. GtNow + // can only be used with the Within rule. + GtNow *bool `protobuf:"varint,8,opt,name=gt_now,json=gtNow" json:"gt_now,omitempty"` + // Within specifies that this field must be within this duration of the + // current time. This constraint can be used alone or with the LtNow and + // GtNow rules. + Within *durationpb.Duration `protobuf:"bytes,9,opt,name=within" json:"within,omitempty"` +} + +func (x *TimestampRules) Reset() { + *x = TimestampRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimestampRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampRules) ProtoMessage() {} + +func (x *TimestampRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimestampRules.ProtoReflect.Descriptor instead. +func (*TimestampRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{22} +} + +func (x *TimestampRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *TimestampRules) GetConst() *timestamppb.Timestamp { + if x != nil { + return x.Const + } + return nil +} + +func (x *TimestampRules) GetLt() *timestamppb.Timestamp { + if x != nil { + return x.Lt + } + return nil +} + +func (x *TimestampRules) GetLte() *timestamppb.Timestamp { + if x != nil { + return x.Lte + } + return nil +} + +func (x *TimestampRules) GetGt() *timestamppb.Timestamp { + if x != nil { + return x.Gt + } + return nil +} + +func (x *TimestampRules) GetGte() *timestamppb.Timestamp { + if x != nil { + return x.Gte + } + return nil +} + +func (x *TimestampRules) GetLtNow() bool { + if x != nil && x.LtNow != nil { + return *x.LtNow + } + return false +} + +func (x *TimestampRules) GetGtNow() bool { + if x != nil && x.GtNow != nil { + return *x.GtNow + } + return false +} + +func (x *TimestampRules) GetWithin() *durationpb.Duration { + if x != nil { + return x.Within + } + return nil +} + +var file_validate_validate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1071, + Name: "validate.disabled", + Tag: "varint,1071,opt,name=disabled", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1072, + Name: "validate.ignored", + Tag: "varint,1072,opt,name=ignored", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.OneofOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1071, + Name: "validate.required", + Tag: "varint,1071,opt,name=required", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldRules)(nil), + Field: 1071, + Name: "validate.rules", + Tag: "bytes,1071,opt,name=rules", + Filename: "validate/validate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // Disabled nullifies any validation rules for this message, including any + // message fields associated with it that do support validation. + // + // optional bool disabled = 1071; + E_Disabled = &file_validate_validate_proto_extTypes[0] + // Ignore skips generation of validation methods for this message. + // + // optional bool ignored = 1072; + E_Ignored = &file_validate_validate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.OneofOptions. +var ( + // Required ensures that exactly one the field options in a oneof is set; + // validation fails if no fields in the oneof are set. + // + // optional bool required = 1071; + E_Required = &file_validate_validate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + // + // optional validate.FieldRules rules = 1071; + E_Rules = &file_validate_validate_proto_extTypes[3] +) + +var File_validate_validate_proto protoreflect.FileDescriptor + +var file_validate_validate_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x08, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, + 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, + 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69, 0x6e, + 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x53, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, + 0x69, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x53, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, + 0x32, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, + 0x78, 0x65, 0x64, 0x36, 0x34, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x35, + 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78, + 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69, + 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x35, 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, + 0x34, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x29, 0x0a, 0x04, + 0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x04, 0x62, 0x6f, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, + 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, + 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x02, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x02, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x01, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, + 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, 0x52, 0x05, + 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x67, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x67, 0x74, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69, + 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, + 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, + 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x04, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, + 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, + 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x67, 0x74, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x11, 0x52, 0x02, 0x69, 0x6e, + 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x11, + 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x12, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x12, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x12, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb2, + 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x07, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x07, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x07, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x07, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x07, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, + 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x07, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x06, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, + 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x06, 0x52, 0x05, + 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69, + 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0f, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0f, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0f, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3, + 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x10, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x10, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x10, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x10, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x10, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x10, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x10, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x21, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x22, 0xd4, 0x05, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12, + 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, + 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x02, 0x69, 0x70, + 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x03, + 0x75, 0x72, 0x69, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, + 0x12, 0x19, 0x0a, 0x07, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x06, 0x75, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x1a, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x40, 0x0a, + 0x10, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x48, 0x00, 0x52, + 0x0e, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, + 0x1c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x22, 0xe2, + 0x02, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17, + 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, + 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x6e, + 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x10, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x00, 0x52, 0x02, 0x69, 0x70, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, + 0x69, 0x70, 0x76, 0x36, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, + 0x76, 0x36, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x22, 0x6b, 0x0a, 0x09, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, + 0x5f, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, + 0x22, 0x3e, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x73, 0x6b, 0x69, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x22, 0xb0, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, + 0x5f, 0x73, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, + 0x6f, 0x53, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x4d, 0x0a, 0x08, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x22, 0xe9, 0x02, 0x0a, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x29, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x6c, + 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x02, 0x67, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x74, 0x65, + 0x12, 0x29, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x30, 0x0a, 0x06, 0x6e, + 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x22, 0xf3, 0x02, + 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x05, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x2a, + 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x02, 0x67, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x03, 0x67, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x74, 0x5f, 0x6e, 0x6f, 0x77, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x6c, 0x74, 0x4e, 0x6f, 0x77, 0x12, 0x15, 0x0a, 0x06, 0x67, 0x74, 0x5f, + 0x6e, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x67, 0x74, 0x4e, 0x6f, 0x77, + 0x12, 0x31, 0x0a, 0x06, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x77, 0x69, 0x74, + 0x68, 0x69, 0x6e, 0x2a, 0x46, 0x0a, 0x0a, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, + 0x78, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, + 0x0a, 0x10, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x5f, 0x4e, 0x41, + 0x4d, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41, + 0x44, 0x45, 0x52, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x02, 0x3a, 0x3c, 0x0a, 0x08, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x07, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x3a, 0x4a, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x50, 0x0a, + 0x1a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70, + 0x67, 0x76, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5a, 0x32, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, +} + +var ( + file_validate_validate_proto_rawDescOnce sync.Once + file_validate_validate_proto_rawDescData = file_validate_validate_proto_rawDesc +) + +func file_validate_validate_proto_rawDescGZIP() []byte { + file_validate_validate_proto_rawDescOnce.Do(func() { + file_validate_validate_proto_rawDescData = protoimpl.X.CompressGZIP(file_validate_validate_proto_rawDescData) + }) + return file_validate_validate_proto_rawDescData +} + +var file_validate_validate_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_validate_validate_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_validate_validate_proto_goTypes = []interface{}{ + (KnownRegex)(0), // 0: validate.KnownRegex + (*FieldRules)(nil), // 1: validate.FieldRules + (*FloatRules)(nil), // 2: validate.FloatRules + (*DoubleRules)(nil), // 3: validate.DoubleRules + (*Int32Rules)(nil), // 4: validate.Int32Rules + (*Int64Rules)(nil), // 5: validate.Int64Rules + (*UInt32Rules)(nil), // 6: validate.UInt32Rules + (*UInt64Rules)(nil), // 7: validate.UInt64Rules + (*SInt32Rules)(nil), // 8: validate.SInt32Rules + (*SInt64Rules)(nil), // 9: validate.SInt64Rules + (*Fixed32Rules)(nil), // 10: validate.Fixed32Rules + (*Fixed64Rules)(nil), // 11: validate.Fixed64Rules + (*SFixed32Rules)(nil), // 12: validate.SFixed32Rules + (*SFixed64Rules)(nil), // 13: validate.SFixed64Rules + (*BoolRules)(nil), // 14: validate.BoolRules + (*StringRules)(nil), // 15: validate.StringRules + (*BytesRules)(nil), // 16: validate.BytesRules + (*EnumRules)(nil), // 17: validate.EnumRules + (*MessageRules)(nil), // 18: validate.MessageRules + (*RepeatedRules)(nil), // 19: validate.RepeatedRules + (*MapRules)(nil), // 20: validate.MapRules + (*AnyRules)(nil), // 21: validate.AnyRules + (*DurationRules)(nil), // 22: validate.DurationRules + (*TimestampRules)(nil), // 23: validate.TimestampRules + (*durationpb.Duration)(nil), // 24: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (*descriptorpb.MessageOptions)(nil), // 26: google.protobuf.MessageOptions + (*descriptorpb.OneofOptions)(nil), // 27: google.protobuf.OneofOptions + (*descriptorpb.FieldOptions)(nil), // 28: google.protobuf.FieldOptions +} +var file_validate_validate_proto_depIdxs = []int32{ + 18, // 0: validate.FieldRules.message:type_name -> validate.MessageRules + 2, // 1: validate.FieldRules.float:type_name -> validate.FloatRules + 3, // 2: validate.FieldRules.double:type_name -> validate.DoubleRules + 4, // 3: validate.FieldRules.int32:type_name -> validate.Int32Rules + 5, // 4: validate.FieldRules.int64:type_name -> validate.Int64Rules + 6, // 5: validate.FieldRules.uint32:type_name -> validate.UInt32Rules + 7, // 6: validate.FieldRules.uint64:type_name -> validate.UInt64Rules + 8, // 7: validate.FieldRules.sint32:type_name -> validate.SInt32Rules + 9, // 8: validate.FieldRules.sint64:type_name -> validate.SInt64Rules + 10, // 9: validate.FieldRules.fixed32:type_name -> validate.Fixed32Rules + 11, // 10: validate.FieldRules.fixed64:type_name -> validate.Fixed64Rules + 12, // 11: validate.FieldRules.sfixed32:type_name -> validate.SFixed32Rules + 13, // 12: validate.FieldRules.sfixed64:type_name -> validate.SFixed64Rules + 14, // 13: validate.FieldRules.bool:type_name -> validate.BoolRules + 15, // 14: validate.FieldRules.string:type_name -> validate.StringRules + 16, // 15: validate.FieldRules.bytes:type_name -> validate.BytesRules + 17, // 16: validate.FieldRules.enum:type_name -> validate.EnumRules + 19, // 17: validate.FieldRules.repeated:type_name -> validate.RepeatedRules + 20, // 18: validate.FieldRules.map:type_name -> validate.MapRules + 21, // 19: validate.FieldRules.any:type_name -> validate.AnyRules + 22, // 20: validate.FieldRules.duration:type_name -> validate.DurationRules + 23, // 21: validate.FieldRules.timestamp:type_name -> validate.TimestampRules + 0, // 22: validate.StringRules.well_known_regex:type_name -> validate.KnownRegex + 1, // 23: validate.RepeatedRules.items:type_name -> validate.FieldRules + 1, // 24: validate.MapRules.keys:type_name -> validate.FieldRules + 1, // 25: validate.MapRules.values:type_name -> validate.FieldRules + 24, // 26: validate.DurationRules.const:type_name -> google.protobuf.Duration + 24, // 27: validate.DurationRules.lt:type_name -> google.protobuf.Duration + 24, // 28: validate.DurationRules.lte:type_name -> google.protobuf.Duration + 24, // 29: validate.DurationRules.gt:type_name -> google.protobuf.Duration + 24, // 30: validate.DurationRules.gte:type_name -> google.protobuf.Duration + 24, // 31: validate.DurationRules.in:type_name -> google.protobuf.Duration + 24, // 32: validate.DurationRules.not_in:type_name -> google.protobuf.Duration + 25, // 33: validate.TimestampRules.const:type_name -> google.protobuf.Timestamp + 25, // 34: validate.TimestampRules.lt:type_name -> google.protobuf.Timestamp + 25, // 35: validate.TimestampRules.lte:type_name -> google.protobuf.Timestamp + 25, // 36: validate.TimestampRules.gt:type_name -> google.protobuf.Timestamp + 25, // 37: validate.TimestampRules.gte:type_name -> google.protobuf.Timestamp + 24, // 38: validate.TimestampRules.within:type_name -> google.protobuf.Duration + 26, // 39: validate.disabled:extendee -> google.protobuf.MessageOptions + 26, // 40: validate.ignored:extendee -> google.protobuf.MessageOptions + 27, // 41: validate.required:extendee -> google.protobuf.OneofOptions + 28, // 42: validate.rules:extendee -> google.protobuf.FieldOptions + 1, // 43: validate.rules:type_name -> validate.FieldRules + 44, // [44:44] is the sub-list for method output_type + 44, // [44:44] is the sub-list for method input_type + 43, // [43:44] is the sub-list for extension type_name + 39, // [39:43] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_validate_validate_proto_init() } +func file_validate_validate_proto_init() { + if File_validate_validate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_validate_validate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FloatRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SInt32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SInt64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Fixed32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Fixed64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SFixed32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SFixed64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BoolRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BytesRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RepeatedRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurationRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimestampRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_validate_validate_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FieldRules_Float)(nil), + (*FieldRules_Double)(nil), + (*FieldRules_Int32)(nil), + (*FieldRules_Int64)(nil), + (*FieldRules_Uint32)(nil), + (*FieldRules_Uint64)(nil), + (*FieldRules_Sint32)(nil), + (*FieldRules_Sint64)(nil), + (*FieldRules_Fixed32)(nil), + (*FieldRules_Fixed64)(nil), + (*FieldRules_Sfixed32)(nil), + (*FieldRules_Sfixed64)(nil), + (*FieldRules_Bool)(nil), + (*FieldRules_String_)(nil), + (*FieldRules_Bytes)(nil), + (*FieldRules_Enum)(nil), + (*FieldRules_Repeated)(nil), + (*FieldRules_Map)(nil), + (*FieldRules_Any)(nil), + (*FieldRules_Duration)(nil), + (*FieldRules_Timestamp)(nil), + } + file_validate_validate_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*StringRules_Email)(nil), + (*StringRules_Hostname)(nil), + (*StringRules_Ip)(nil), + (*StringRules_Ipv4)(nil), + (*StringRules_Ipv6)(nil), + (*StringRules_Uri)(nil), + (*StringRules_UriRef)(nil), + (*StringRules_Address)(nil), + (*StringRules_Uuid)(nil), + (*StringRules_WellKnownRegex)(nil), + } + file_validate_validate_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*BytesRules_Ip)(nil), + (*BytesRules_Ipv4)(nil), + (*BytesRules_Ipv6)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_validate_validate_proto_rawDesc, + NumEnums: 1, + NumMessages: 23, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_validate_validate_proto_goTypes, + DependencyIndexes: file_validate_validate_proto_depIdxs, + EnumInfos: file_validate_validate_proto_enumTypes, + MessageInfos: file_validate_validate_proto_msgTypes, + ExtensionInfos: file_validate_validate_proto_extTypes, + }.Build() + File_validate_validate_proto = out.File + file_validate_validate_proto_rawDesc = nil + file_validate_validate_proto_goTypes = nil + file_validate_validate_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto new file mode 100644 index 000000000..5aa965393 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto @@ -0,0 +1,862 @@ +syntax = "proto2"; +package validate; + +option go_package = "github.com/envoyproxy/protoc-gen-validate/validate"; +option java_package = "io.envoyproxy.pgv.validate"; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// Validation rules applied at the message level +extend google.protobuf.MessageOptions { + // Disabled nullifies any validation rules for this message, including any + // message fields associated with it that do support validation. + optional bool disabled = 1071; + // Ignore skips generation of validation methods for this message. + optional bool ignored = 1072; +} + +// Validation rules applied at the oneof level +extend google.protobuf.OneofOptions { + // Required ensures that exactly one the field options in a oneof is set; + // validation fails if no fields in the oneof are set. + optional bool required = 1071; +} + +// Validation rules applied at the field level +extend google.protobuf.FieldOptions { + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + optional FieldRules rules = 1071; +} + +// FieldRules encapsulates the rules for each type of field. Depending on the +// field, the correct set should be used to ensure proper validations. +message FieldRules { + optional MessageRules message = 17; + oneof type { + // Scalar Field Types + FloatRules float = 1; + DoubleRules double = 2; + Int32Rules int32 = 3; + Int64Rules int64 = 4; + UInt32Rules uint32 = 5; + UInt64Rules uint64 = 6; + SInt32Rules sint32 = 7; + SInt64Rules sint64 = 8; + Fixed32Rules fixed32 = 9; + Fixed64Rules fixed64 = 10; + SFixed32Rules sfixed32 = 11; + SFixed64Rules sfixed64 = 12; + BoolRules bool = 13; + StringRules string = 14; + BytesRules bytes = 15; + + // Complex Field Types + EnumRules enum = 16; + RepeatedRules repeated = 18; + MapRules map = 19; + + // Well-Known Field Types + AnyRules any = 20; + DurationRules duration = 21; + TimestampRules timestamp = 22; + } +} + +// FloatRules describes the constraints applied to `float` values +message FloatRules { + // Const specifies that this field must be exactly the specified value + optional float const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional float lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional float lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional float gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional float gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated float in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated float not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// DoubleRules describes the constraints applied to `double` values +message DoubleRules { + // Const specifies that this field must be exactly the specified value + optional double const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional double lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional double lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional double gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional double gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated double in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated double not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int32Rules describes the constraints applied to `int32` values +message Int32Rules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int64Rules describes the constraints applied to `int64` values +message Int64Rules { + // Const specifies that this field must be exactly the specified value + optional int64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt32Rules describes the constraints applied to `uint32` values +message UInt32Rules { + // Const specifies that this field must be exactly the specified value + optional uint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt64Rules describes the constraints applied to `uint64` values +message UInt64Rules { + // Const specifies that this field must be exactly the specified value + optional uint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt32Rules describes the constraints applied to `sint32` values +message SInt32Rules { + // Const specifies that this field must be exactly the specified value + optional sint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt64Rules describes the constraints applied to `sint64` values +message SInt64Rules { + // Const specifies that this field must be exactly the specified value + optional sint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed32Rules describes the constraints applied to `fixed32` values +message Fixed32Rules { + // Const specifies that this field must be exactly the specified value + optional fixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed64Rules describes the constraints applied to `fixed64` values +message Fixed64Rules { + // Const specifies that this field must be exactly the specified value + optional fixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed32Rules describes the constraints applied to `sfixed32` values +message SFixed32Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed64Rules describes the constraints applied to `sfixed64` values +message SFixed64Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// BoolRules describes the constraints applied to `bool` values +message BoolRules { + // Const specifies that this field must be exactly the specified value + optional bool const = 1; +} + +// StringRules describe the constraints applied to `string` values +message StringRules { + // Const specifies that this field must be exactly the specified value + optional string const = 1; + + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 len = 19; + + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 max_len = 3; + + // LenBytes specifies that this field must be the specified number of bytes + optional uint64 len_bytes = 20; + + // MinBytes specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_bytes = 4; + + // MaxBytes specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_bytes = 5; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 6; + + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + optional string prefix = 7; + + // Suffix specifies that this field must have the specified substring at + // the end of the string. + optional string suffix = 8; + + // Contains specifies that this field must have the specified substring + // anywhere in the string. + optional string contains = 9; + + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + optional string not_contains = 23; + + // In specifies that this field must be equal to one of the specified + // values + repeated string in = 10; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated string not_in = 11; + + // WellKnown rules provide advanced constraints against common string + // patterns + oneof well_known { + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + bool email = 12; + + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + bool hostname = 13; + + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + bool ip = 14; + + // Ipv4 specifies that the field must be a valid IPv4 address. + bool ipv4 = 15; + + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + bool ipv6 = 16; + + // Uri specifies that the field must be a valid, absolute URI as defined + // by RFC 3986 + bool uri = 17; + + // UriRef specifies that the field must be a valid URI as defined by RFC + // 3986 and may be relative or absolute. + bool uri_ref = 18; + + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + bool address = 21; + + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + bool uuid = 22; + + // WellKnownRegex specifies a common well known pattern defined as a regex. + KnownRegex well_known_regex = 24; + } + + // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable + // strict header validation. + // By default, this is true, and HTTP header validations are RFC-compliant. + // Setting to false will enable a looser validations that only disallows + // \r\n\0 characters, which can be used to bypass header matching rules. + optional bool strict = 25 [default = true]; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 26; +} + +// WellKnownRegex contain some well-known patterns. +enum KnownRegex { + UNKNOWN = 0; + + // HTTP header name as defined by RFC 7230. + HTTP_HEADER_NAME = 1; + + // HTTP header value as defined by RFC 7230. + HTTP_HEADER_VALUE = 2; +} + +// BytesRules describe the constraints applied to `bytes` values +message BytesRules { + // Const specifies that this field must be exactly the specified value + optional bytes const = 1; + + // Len specifies that this field must be the specified number of bytes + optional uint64 len = 13; + + // MinLen specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_len = 3; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 4; + + // Prefix specifies that this field must have the specified bytes at the + // beginning of the string. + optional bytes prefix = 5; + + // Suffix specifies that this field must have the specified bytes at the + // end of the string. + optional bytes suffix = 6; + + // Contains specifies that this field must have the specified bytes + // anywhere in the string. + optional bytes contains = 7; + + // In specifies that this field must be equal to one of the specified + // values + repeated bytes in = 8; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated bytes not_in = 9; + + // WellKnown rules provide advanced constraints against common byte + // patterns + oneof well_known { + // Ip specifies that the field must be a valid IP (v4 or v6) address in + // byte format + bool ip = 10; + + // Ipv4 specifies that the field must be a valid IPv4 address in byte + // format + bool ipv4 = 11; + + // Ipv6 specifies that the field must be a valid IPv6 address in byte + // format + bool ipv6 = 12; + } + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 14; +} + +// EnumRules describe the constraints applied to enum values +message EnumRules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // DefinedOnly specifies that this field must be only one of the defined + // values for this enum, failing on any undefined value. + optional bool defined_only = 2; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 3; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 4; +} + +// MessageRules describe the constraints applied to embedded message values. +// For message-type fields, validation is performed recursively. +message MessageRules { + // Skip specifies that the validation rules of this field should not be + // evaluated + optional bool skip = 1; + + // Required specifies that this field must be set + optional bool required = 2; +} + +// RepeatedRules describe the constraints applied to `repeated` values +message RepeatedRules { + // MinItems specifies that this field must have the specified number of + // items at a minimum + optional uint64 min_items = 1; + + // MaxItems specifies that this field must have the specified number of + // items at a maximum + optional uint64 max_items = 2; + + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + optional bool unique = 3; + + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + optional FieldRules items = 4; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 5; +} + +// MapRules describe the constraints applied to `map` values +message MapRules { + // MinPairs specifies that this field must have the specified number of + // KVs at a minimum + optional uint64 min_pairs = 1; + + // MaxPairs specifies that this field must have the specified number of + // KVs at a maximum + optional uint64 max_pairs = 2; + + // NoSparse specifies values in this field cannot be unset. This only + // applies to map's with message value types. + optional bool no_sparse = 3; + + // Keys specifies the constraints to be applied to each key in the field. + optional FieldRules keys = 4; + + // Values specifies the constraints to be applied to the value of each key + // in the field. Message values will still have their validations evaluated + // unless skip is specified here. + optional FieldRules values = 5; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 6; +} + +// AnyRules describe constraints applied exclusively to the +// `google.protobuf.Any` well-known type +message AnyRules { + // Required specifies that this field must be set + optional bool required = 1; + + // In specifies that this field's `type_url` must be equal to one of the + // specified values. + repeated string in = 2; + + // NotIn specifies that this field's `type_url` must not be equal to any of + // the specified values. + repeated string not_in = 3; +} + +// DurationRules describe the constraints applied exclusively to the +// `google.protobuf.Duration` well-known type +message DurationRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Duration const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Duration lt = 3; + + // Lt specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Duration lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Duration gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Duration gte = 6; + + // In specifies that this field must be equal to one of the specified + // values + repeated google.protobuf.Duration in = 7; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated google.protobuf.Duration not_in = 8; +} + +// TimestampRules describe the constraints applied exclusively to the +// `google.protobuf.Timestamp` well-known type +message TimestampRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Timestamp const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Timestamp lt = 3; + + // Lte specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Timestamp lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Timestamp gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Timestamp gte = 6; + + // LtNow specifies that this must be less than the current time. LtNow + // can only be used with the Within rule. + optional bool lt_now = 7; + + // GtNow specifies that this must be greater than the current time. GtNow + // can only be used with the Within rule. + optional bool gt_now = 8; + + // Within specifies that this field must be within this duration of the + // current time. This constraint can be used alone or with the LtNow and + // GtNow rules. + optional google.protobuf.Duration within = 9; +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md index 28bdd2fc0..6f717dbd8 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md @@ -1,3 +1,27 @@ +# v4.0.4 + +## Fixed + + - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a + breaking change. See #136 / #137. + +# v4.0.3 + +## Changed + + - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) + - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) + - Dependency updates + +# v4.0.2 + +## Changed + + - Improved documentation of Verify() to note that JSONWebKeySet is a supported + argument type (#104) + - Defined exported error values for missing x5c header and unsupported elliptic + curves error cases (#117) + # v4.0.1 ## Fixed diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index aba08424c..d81b03b44 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return nil, err + } decrypter, err := newDecrypter(key) if err != nil { return nil, err @@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - key := tryJWKS(decryptionKey, obj.Header) + key, err := tryJWKS(decryptionKey, obj.Header) + if err != nil { + return -1, Header{}, nil, err + } decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index a565aaab2..8a5284210 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) { return key.K.bytes(), nil } -func tryJWKS(key interface{}, headers ...Header) interface{} { +var ( + // ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a + // key ID which matches one in the provided tokens headers. + ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set") +) + +func tryJWKS(key interface{}, headers ...Header) (interface{}, error) { var jwks JSONWebKeySet switch jwksType := key.(type) { @@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { case JSONWebKeySet: jwks = jwksType default: - return key + // If the specified key is not a JWKS, return as is. + return key, nil } + // Determine the KID to search for from the headers. var kid string for _, header := range headers { if header.KeyID != "" { @@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} { } } + // If no KID is specified in the headers, reject. if kid == "" { - return key + return nil, ErrJWKSKidNotFound } + // Find the JWK with the matching KID. If no JWK with the specified KID is + // found, reject. keys := jwks.Key(kid) if len(keys) == 0 { - return key + return nil, ErrJWKSKidNotFound } - return keys[0].Key + return keys[0].Key, nil } diff --git a/vendor/github.com/go-jose/go-jose/v4/opaque.go b/vendor/github.com/go-jose/go-jose/v4/opaque.go index 68db085ef..429427232 100644 --- a/vendor/github.com/go-jose/go-jose/v4/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v4/opaque.go @@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig } // OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key. +// +// Note: this cannot currently be implemented outside this package because of its +// unexported method. type OpaqueKeyEncrypter interface { // KeyID returns the kid KeyID() string diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index 46c9a4d96..3dec0112b 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return err + } verifier, err := newVerifier(key) if err != nil { return err @@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // The verificationKey argument must have one of the types allowed for the // verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - key := tryJWKS(verificationKey, obj.headers()...) + key, err := tryJWKS(verificationKey, obj.headers()...) + if err != nil { + return -1, Signature{}, err + } verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err diff --git a/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go new file mode 100644 index 000000000..d4d6019ff --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go @@ -0,0 +1,63 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//go:build go1.23 + +// Package iterator contains helper for working with iterators. It is meant for +// internal use only by the Go Client Libraries. +package iterator + +import ( + "iter" + + otherit "google.golang.org/api/iterator" +) + +// RangeAdapter transforms client iterator type into a [iter.Seq2] that can +// be used with Go's range expressions. +// +// This is for internal use only. +func RangeAdapter[T any](next func() (T, error)) iter.Seq2[T, error] { + var err error + return func(yield func(T, error) bool) { + for { + if err != nil { + return + } + var resp T + resp, err = next() + if err == otherit.Done { + return + } + if !yield(resp, err) { + return + } + } + } +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 52c991b1e..0090321ca 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "encoding/base64" "encoding/hex" + "encoding/json" "fmt" "net" "net/http" @@ -41,6 +42,7 @@ const ( EnvVaultClientCert = "VAULT_CLIENT_CERT" EnvVaultClientKey = "VAULT_CLIENT_KEY" EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultHeaders = "VAULT_HEADERS" EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" EnvVaultNamespace = "VAULT_NAMESPACE" @@ -665,6 +667,30 @@ func NewClient(c *Config) (*Client, error) { client.setNamespace(namespace) } + if envHeaders := os.Getenv(EnvVaultHeaders); envHeaders != "" { + var result map[string]any + err := json.Unmarshal([]byte(envHeaders), &result) + if err != nil { + return nil, fmt.Errorf("could not unmarshal environment-supplied headers") + } + var forbiddenHeaders []string + for key, value := range result { + if strings.HasPrefix(key, "X-Vault-") { + forbiddenHeaders = append(forbiddenHeaders, key) + continue + } + + value, ok := value.(string) + if !ok { + return nil, fmt.Errorf("environment-supplied headers include non-string values") + } + client.AddHeader(key, value) + } + if len(forbiddenHeaders) > 0 { + return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + } + } + return client, nil } @@ -705,7 +731,7 @@ func (c *Client) SetAddress(addr string) error { parsedAddr, err := c.config.ParseAddress(addr) if err != nil { - return errwrap.Wrapf("failed to set address: {{err}}", err) + return fmt.Errorf("failed to set address: %w", err) } c.addr = parsedAddr diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index 4bc1390b9..bdb8fb64b 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/cenkalti/backoff/v3" + "github.com/cenkalti/backoff/v4" ) var ( diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index a2d912c64..c0c8dea73 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -7,7 +7,6 @@ import ( "bytes" "encoding/json" "io" - "io/ioutil" "net/http" "net/url" @@ -77,13 +76,13 @@ func (r *Request) ToHTTP() (*http.Request, error) { // No body case r.BodyBytes != nil: - req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + req.Request.Body = io.NopCloser(bytes.NewReader(r.BodyBytes)) default: if c, ok := r.Body.(io.ReadCloser); ok { req.Request.Body = c } else { - req.Request.Body = ioutil.NopCloser(r.Body) + req.Request.Body = io.NopCloser(r.Body) } } diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go index 2842c1255..23246bf71 100644 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" ) @@ -44,7 +43,7 @@ func (r *Response) Error() error { } r.Body.Close() - r.Body = ioutil.NopCloser(bodyBuf) + r.Body = io.NopCloser(bodyBuf) ns := r.Header.Get(NamespaceHeaderName) // Build up the error object diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index d37bf3cf0..7df9f66a4 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -159,6 +159,10 @@ TOKEN_DONE: goto DONE } + if s.Data["identity_policies"] == nil { + goto DONE + } + sList, ok := s.Data["identity_policies"].([]string) if ok { identityPolicies = sList diff --git a/vendor/github.com/hashicorp/vault/api/sudo_paths.go b/vendor/github.com/hashicorp/vault/api/sudo_paths.go index 24beb4bb1..d458cbde0 100644 --- a/vendor/github.com/hashicorp/vault/api/sudo_paths.go +++ b/vendor/github.com/hashicorp/vault/api/sudo_paths.go @@ -28,6 +28,7 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + "/sys/internal/counters/activity/export": regexp.MustCompile(`^/sys/internal/counters/activity/export$`), "/sys/leases": regexp.MustCompile(`^/sys/leases$`), // This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing // slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index 699f6e9fd..f0e896271 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -264,7 +264,7 @@ func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) continue } var b []byte - b, err = ioutil.ReadAll(t) + b, err = io.ReadAll(t) if err != nil || len(b) == 0 { return } diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go index 5ebeea35d..51654e954 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -4,9 +4,29 @@ Wrapper APIs for in-toto attestation ResourceDescriptor protos. package v1 -import "errors" +import ( + "encoding/hex" + "errors" + "fmt" +) -var ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") +var ( + ErrIncorrectDigestLength = errors.New("digest has incorrect length") + ErrInvalidDigestEncoding = errors.New("digest is not valid hex-encoded string") + ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") +) + +// Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's +// digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. +// +// SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 +func isSupportedFixedSizeAlgorithm(alg string) (bool, int) { + algos := map[string]int{"md5": 16, "sha1": 20, "sha224": 28, "sha512_224": 28, "sha256": 32, "sha512_256": 32, "sha384": 48, "sha512": 64, "sha3_224": 28, "sha3_256": 32, "sha3_384": 48, "sha3_512": 64, "gitCommit": 20, "dirHash": 32} + + size, ok := algos[alg] + return ok, size +} func (d *ResourceDescriptor) Validate() error { // at least one of name, URI or digest are required @@ -14,5 +34,28 @@ func (d *ResourceDescriptor) Validate() error { return ErrRDRequiredField } + if len(d.GetDigest()) > 0 { + for alg, digest := range d.GetDigest() { + + // Per https://github.com/in-toto/attestation/blob/main/spec/v1/digest_set.md + // check encoding and length for supported algorithms; + // use of custom, unsupported algorithms is allowed and does not not generate validation errors. + supported, size := isSupportedFixedSizeAlgorithm(alg) + if supported { + // the in-toto spec expects a hex-encoded string in DigestSets for supported algorithms + hashBytes, err := hex.DecodeString(digest) + + if err != nil { + return fmt.Errorf("%w (%s: %s)", ErrInvalidDigestEncoding, alg, digest) + } + + // check the length of the digest + if len(hashBytes) != size { + return fmt.Errorf("%w: got %d bytes, want %d bytes (%s: %s)", ErrIncorrectDigestLength, len(hashBytes), size, alg, digest) + } + } + } + } + return nil } diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go index f662c8b89..3e59869b1 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/resource_descriptor.proto diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go index 8f48858d3..a2bd2c2d7 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/statement.proto diff --git a/vendor/github.com/jellydator/ttlcache/v3/README.md b/vendor/github.com/jellydator/ttlcache/v3/README.md index 3a557b030..a17cb2437 100644 --- a/vendor/github.com/jellydator/ttlcache/v3/README.md +++ b/vendor/github.com/jellydator/ttlcache/v3/README.md @@ -10,7 +10,8 @@ - Type parameters - Item expiration and automatic deletion - Automatic expiration time extension on each `Get` call -- `Loader` interface that may be used to load/lazily initialize missing cache +- `Loader` interface that may be used to load/lazily initialize missing cache +- Thread Safe items - Event handlers (insertion and eviction) - Metrics diff --git a/vendor/github.com/jellydator/ttlcache/v3/cache.go b/vendor/github.com/jellydator/ttlcache/v3/cache.go index 1ad3afbec..1b9e72ef0 100644 --- a/vendor/github.com/jellydator/ttlcache/v3/cache.go +++ b/vendor/github.com/jellydator/ttlcache/v3/cache.go @@ -133,7 +133,7 @@ func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] { ttl = c.options.ttl } - elem := c.get(key, false) + elem := c.get(key, false, true) if elem != nil { // update/overwrite an existing item item := elem.Value.(*Item[K, V]) @@ -176,14 +176,14 @@ func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] { // It returns nil if the item is not found or is expired. // Not safe for concurrent use by multiple goroutines without additional // locking. -func (c *Cache[K, V]) get(key K, touch bool) *list.Element { +func (c *Cache[K, V]) get(key K, touch bool, includeExpired bool) *list.Element { elem := c.items.values[key] if elem == nil { return nil } item := elem.Value.(*Item[K, V]) - if item.isExpiredUnsafe() { + if !includeExpired && item.isExpiredUnsafe() { return nil } @@ -218,7 +218,7 @@ func (c *Cache[K, V]) getWithOpts(key K, lockAndLoad bool, opts ...Option[K, V]) c.items.mu.Lock() } - elem := c.get(key, !getOpts.disableTouchOnHit) + elem := c.get(key, !getOpts.disableTouchOnHit, false) if lockAndLoad { c.items.mu.Unlock() @@ -339,8 +339,8 @@ func (c *Cache[K, V]) Has(key K) bool { c.items.mu.RLock() defer c.items.mu.RUnlock() - _, ok := c.items.values[key] - return ok + elem, ok := c.items.values[key] + return ok && !elem.Value.(*Item[K, V]).isExpiredUnsafe() } // GetOrSet retrieves an item from the cache by the provided key. @@ -436,26 +436,66 @@ func (c *Cache[K, V]) DeleteExpired() { // If the item is not found, the method is no-op. func (c *Cache[K, V]) Touch(key K) { c.items.mu.Lock() - c.get(key, true) + c.get(key, true, false) c.items.mu.Unlock() } -// Len returns the total number of items in the cache. +// Len returns the number of unexpired items in the cache. func (c *Cache[K, V]) Len() int { c.items.mu.RLock() defer c.items.mu.RUnlock() - return len(c.items.values) + total := c.items.expQueue.Len() + if total == 0 { + return 0 + } + + // search the heap-based expQueue by BFS + countExpired := func() int { + var ( + q []int + res int + ) + + item := c.items.expQueue[0].Value.(*Item[K, V]) + if !item.isExpiredUnsafe() { + return res + } + + q = append(q, 0) + for len(q) > 0 { + pop := q[0] + q = q[1:] + res++ + + for i := 1; i <= 2; i++ { + idx := 2*pop + i + if idx >= total { + break + } + + item = c.items.expQueue[idx].Value.(*Item[K, V]) + if item.isExpiredUnsafe() { + q = append(q, idx) + } + } + } + return res + } + + return total - countExpired() } -// Keys returns all keys currently present in the cache. +// Keys returns all unexpired keys in the cache. func (c *Cache[K, V]) Keys() []K { c.items.mu.RLock() defer c.items.mu.RUnlock() - res := make([]K, 0, len(c.items.values)) - for k := range c.items.values { - res = append(res, k) + res := make([]K, 0) + for k, elem := range c.items.values { + if !elem.Value.(*Item[K, V]).isExpiredUnsafe() { + res = append(res, k) + } } return res @@ -467,18 +507,18 @@ func (c *Cache[K, V]) Items() map[K]*Item[K, V] { c.items.mu.RLock() defer c.items.mu.RUnlock() - items := make(map[K]*Item[K, V], len(c.items.values)) - for k := range c.items.values { - item := c.get(k, false) - if item != nil { - items[k] = item.Value.(*Item[K, V]) + items := make(map[K]*Item[K, V]) + for k, elem := range c.items.values { + item := elem.Value.(*Item[K, V]) + if item != nil && !item.isExpiredUnsafe() { + items[k] = item } } return items } -// Range calls fn for each item present in the cache. If fn returns false, +// Range calls fn for each unexpired item in the cache. If fn returns false, // Range stops the iteration. func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { c.items.mu.RLock() @@ -491,9 +531,10 @@ func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { for item := c.items.lru.Front(); item != c.items.lru.Back().Next(); item = item.Next() { i := item.Value.(*Item[K, V]) + expired := i.isExpiredUnsafe() c.items.mu.RUnlock() - if !fn(i) { + if !expired && !fn(i) { return } @@ -503,6 +544,32 @@ func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { } } +// RangeBackwards calls fn for each unexpired item in the cache in reverse order. +// If fn returns false, RangeBackwards stops the iteration. +func (c *Cache[K, V]) RangeBackwards(fn func(item *Item[K, V]) bool) { + c.items.mu.RLock() + + // Check if cache is empty + if c.items.lru.Len() == 0 { + c.items.mu.RUnlock() + return + } + + for item := c.items.lru.Back(); item != c.items.lru.Front().Prev(); item = item.Prev() { + i := item.Value.(*Item[K, V]) + expired := i.isExpiredUnsafe() + c.items.mu.RUnlock() + + if !expired && !fn(i) { + return + } + + if item.Prev() != nil { + c.items.mu.RLock() + } + } +} + // Metrics returns the metrics of the cache. func (c *Cache[K, V]) Metrics() Metrics { c.metricsMu.RLock() diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go index 72341b706..12e58e503 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go @@ -1,6 +1,7 @@ package acr import ( + "github.com/sirupsen/logrus" "time" ) @@ -12,14 +13,14 @@ type Credentials struct { ExpireTime time.Time } -func (c *Client) GetCredentials(serverURL string) (*Credentials, error) { +func (c *Client) GetCredentials(serverURL string, logger *logrus.Logger) (*Credentials, error) { registry, err := parseServerURL(serverURL) if err != nil { return nil, err } if registry.IsEE { - client, err := newEEClient(registry.Region) + client, err := newEEClient(registry.Region, logger) if err != nil { return nil, err } @@ -33,7 +34,7 @@ func (c *Client) GetCredentials(serverURL string) (*Credentials, error) { return client.getCredentials(registry.InstanceId) } - client, err := newPersonClient(registry.Region) + client, err := newPersonClient(registry.Region, logger) if err != nil { return nil, err } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go index d92095c82..363e2a0b7 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go @@ -2,6 +2,7 @@ package acr import ( "fmt" + "github.com/sirupsen/logrus" "time" cr2018 "github.com/alibabacloud-go/cr-20181201/client" @@ -14,8 +15,8 @@ type eeClient struct { client *cr2018.Client } -func newEEClient(region string) (*eeClient, error) { - cred, err := getOpenapiAuth() +func newEEClient(region string, logger *logrus.Logger) (*eeClient, error) { + cred, err := getOpenapiAuth(logger) if err != nil { return nil, err } @@ -46,11 +47,13 @@ func (c *eeClient) getInstanceId(instanceName string) (string, error) { return "", fmt.Errorf("get ACR EE instance id for name %q failed: %s", instanceName, resp.Body.String()) } instances := resp.Body.Instances - if len(instances) == 0 { - return "", fmt.Errorf("get ACR EE instance id for name %q failed: instance name is not found", instanceName) + for _, item := range instances { + if tea.StringValue(item.InstanceName) == instanceName { + return tea.StringValue(item.InstanceId), nil + } } - return tea.StringValue(instances[0].InstanceId), nil + return "", fmt.Errorf("get ACR EE instance id for name %q failed: instance name is not found", instanceName) } func (c *eeClient) getCredentials(instanceId string) (*Credentials, error) { @@ -73,7 +76,7 @@ func (c *eeClient) getCredentials(instanceId string) (*Credentials, error) { cred := &Credentials{ UserName: tea.StringValue(resp.Body.TempUsername), Password: tea.StringValue(resp.Body.AuthorizationToken), - ExpireTime: expTime, + ExpireTime: expTime.Add(-time.Minute), } return cred, nil } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go index 696322342..346b74d5e 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go @@ -1,17 +1,26 @@ package acr import ( + "github.com/sirupsen/logrus" "os" "path/filepath" + "time" - "github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper" + "github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider" "github.com/aliyun/credentials-go/credentials" - "github.com/mozillazg/docker-credential-acr-helper/pkg/version" ) var defaultProfilePath = filepath.Join("~", ".alibabacloud", "credentials") -func getOpenapiAuth() (credentials.Credential, error) { +type credentialForV2SDK struct { + *provider.CredentialForV2SDK +} + +type logWrapper struct { + logger *logrus.Logger +} + +func getOpenapiAuth(logger *logrus.Logger) (credentials.Credential, error) { profilePath := defaultProfilePath if os.Getenv(credentials.ENVCredentialFile) != "" { profilePath = os.Getenv(credentials.ENVCredentialFile) @@ -20,18 +29,57 @@ func getOpenapiAuth() (credentials.Credential, error) { if err == nil { if _, err := os.Stat(path); err == nil { _ = os.Setenv(credentials.ENVCredentialFile, path) + return credentials.NewCredential(nil) } } - var conf *credentials.Config - if helper.HaveOidcCredentialRequiredEnv() { - return helper.NewOidcCredential(version.ProjectName) + cp := provider.NewDefaultChainProvider(provider.DefaultChainProviderOptions{ + Logger: &logWrapper{logger: logger}, + }) + cred := &credentialForV2SDK{ + CredentialForV2SDK: provider.NewCredentialForV2SDK(cp, provider.CredentialForV2SDKOptions{ + CredentialRetrievalTimeout: time.Second * 30, + Logger: &logWrapper{logger: logger}, + }), } - cred, err := credentials.NewCredential(conf) return cred, err } +func (c *credentialForV2SDK) GetCredential() (*credentials.CredentialModel, error) { + ak, err := c.GetAccessKeyId() + if err != nil { + return nil, err + } + sk, err := c.GetAccessKeySecret() + if err != nil { + return nil, err + } + token, err := c.GetSecurityToken() + if err != nil { + return nil, err + } + return &credentials.CredentialModel{ + AccessKeyId: ak, + AccessKeySecret: sk, + SecurityToken: token, + BearerToken: nil, + Type: c.GetType(), + }, err +} + +func (l *logWrapper) Info(msg string) { + l.logger.Debug(msg) +} + +func (l *logWrapper) Debug(msg string) { + l.logger.Debug(msg) +} + +func (l *logWrapper) Error(err error, msg string) { + l.logger.WithError(err).Error(msg) +} + func expandPath(path string) (string, error) { if len(path) > 0 && path[0] == '~' { home, err := os.UserHomeDir() diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go index fdf05e1d7..3106245bd 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go @@ -2,6 +2,7 @@ package acr import ( "fmt" + "github.com/sirupsen/logrus" "time" cr2016 "github.com/alibabacloud-go/cr-20160607/client" @@ -15,8 +16,8 @@ type personClient struct { client *cr2016.Client } -func newPersonClient(region string) (*personClient, error) { - cred, err := getOpenapiAuth() +func newPersonClient(region string, logger *logrus.Logger) (*personClient, error) { + cred, err := getOpenapiAuth(logger) if err != nil { return nil, err } diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go index 5bf772ea3..c4f83bf04 100644 --- a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go @@ -34,7 +34,7 @@ func (a *ACRHelper) WithLoggerOut(w io.Writer) *ACRHelper { func (a *ACRHelper) Get(serverURL string) (string, string, error) { // TODO: add cache - cred, err := a.client.GetCredentials(serverURL) + cred, err := a.client.GetCredentials(serverURL, a.logger) if err != nil { a.logger.WithField("name", version.ProjectName). WithField("serverURL", serverURL). diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 1d8b69e65..ec52857a3 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d964b25fe..0755e5564 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -565,7 +565,7 @@ complete solutions exist out there. ## Versioning -Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Expect for parts explicitly marked otherwise, go-toml follows [Semantic Versioning](https://semver.org). The supported version of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this document. The last two major versions of Go are supported (see [Go Release diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 7f4e20c12..161acd934 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -8,7 +8,7 @@ import ( "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -280,7 +280,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -631,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -668,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -732,7 +744,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) - } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { walkStruct(ctx, t, f.Elem()) } continue @@ -951,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 98231bae6..c3df8bee1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -117,27 +115,25 @@ func (d *Decoder) EnableUnmarshalerInterface() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -1078,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } diff --git a/vendor/github.com/planetscale/vtprotobuf/LICENSE b/vendor/github.com/planetscale/vtprotobuf/LICENSE new file mode 100644 index 000000000..dc61de846 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go new file mode 100644 index 000000000..64bde83ed --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go @@ -0,0 +1,122 @@ +// Package protohelpers provides helper functions for encoding and decoding protobuf messages. +// The spec can be found at https://protobuf.dev/programming-guides/encoding/. +package protohelpers + +import ( + "fmt" + "io" + "math/bits" +) + +var ( + // ErrInvalidLength is returned when decoding a negative length. + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + // ErrIntOverflow is returned when decoding a varint representation of an integer that overflows 64 bits. + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + // ErrUnexpectedEndOfGroup is returned when decoding a group end without a corresponding group start. + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) + +// EncodeVarint encodes a uint64 into a varint-encoded byte slice and returns the offset of the encoded value. +// The provided offset is the offset after the last byte of the encoded value. +func EncodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= SizeOfVarint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +// SizeOfVarint returns the size of the varint-encoded value. +func SizeOfVarint(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// SizeOfZigzag returns the size of the zigzag-encoded value. +func SizeOfZigzag(x uint64) (n int) { + return SizeOfVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// Skip the first record of the byte slice and return the offset of the next record. +func Skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go new file mode 100644 index 000000000..c99b9e610 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/any.proto + +package anypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + io "io" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Any anypb.Any + +func (m *Any) CloneVT() *Any { + if m == nil { + return (*Any)(nil) + } + r := new(Any) + r.TypeUrl = m.TypeUrl + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *Any) EqualVT(that *Any) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.TypeUrl != that.TypeUrl { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *Any) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *Any) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Any) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.TypeUrl = stringValue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go new file mode 100644 index 000000000..681ae9f72 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/duration.proto + +package durationpb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Duration durationpb.Duration + +func (m *Duration) CloneVT() *Duration { + if m == nil { + return (*Duration)(nil) + } + r := new(Duration) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Duration) EqualVT(that *Duration) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Duration) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Duration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go new file mode 100644 index 000000000..470c82ac5 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/empty.proto + +package emptypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty emptypb.Empty + +func (m *Empty) CloneVT() *Empty { + if m == nil { + return (*Empty)(nil) + } + r := new(Empty) + return r +} + +func (this *Empty) EqualVT(that *Empty) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return true +} + +func (m *Empty) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Empty) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go new file mode 100644 index 000000000..be8b40e33 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/struct.proto + +package structpb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Struct structpb.Struct +type Value structpb.Value +type Value_NullValue structpb.Value_NullValue +type Value_NumberValue structpb.Value_NumberValue +type Value_StringValue structpb.Value_StringValue +type Value_BoolValue structpb.Value_BoolValue +type Value_StructValue structpb.Value_StructValue +type Value_ListValue structpb.Value_ListValue +type ListValue structpb.ListValue + +func (m *Struct) CloneVT() *Struct { + if m == nil { + return (*Struct)(nil) + } + r := new(Struct) + if rhs := m.Fields; rhs != nil { + tmpContainer := make(map[string]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Fields = tmpContainer + } + return r +} + +func (m *Value) CloneVT() *Value { + if m == nil { + return (*Value)(nil) + } + r := new(Value) + if m.Kind != nil { + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + r.Kind = (*structpb.Value_NullValue)((*Value_NullValue)(c).CloneVT()) + case *structpb.Value_NumberValue: + r.Kind = (*structpb.Value_NumberValue)((*Value_NumberValue)(c).CloneVT()) + case *structpb.Value_StringValue: + r.Kind = (*structpb.Value_StringValue)((*Value_StringValue)(c).CloneVT()) + case *structpb.Value_BoolValue: + r.Kind = (*structpb.Value_BoolValue)((*Value_BoolValue)(c).CloneVT()) + case *structpb.Value_StructValue: + r.Kind = (*structpb.Value_StructValue)((*Value_StructValue)(c).CloneVT()) + case *structpb.Value_ListValue: + r.Kind = (*structpb.Value_ListValue)((*Value_ListValue)(c).CloneVT()) + } + } + return r +} + +func (m *Value_NullValue) CloneVT() *Value_NullValue { + if m == nil { + return (*Value_NullValue)(nil) + } + r := new(Value_NullValue) + r.NullValue = m.NullValue + return r +} + +func (m *Value_NumberValue) CloneVT() *Value_NumberValue { + if m == nil { + return (*Value_NumberValue)(nil) + } + r := new(Value_NumberValue) + r.NumberValue = m.NumberValue + return r +} + +func (m *Value_StringValue) CloneVT() *Value_StringValue { + if m == nil { + return (*Value_StringValue)(nil) + } + r := new(Value_StringValue) + r.StringValue = m.StringValue + return r +} + +func (m *Value_BoolValue) CloneVT() *Value_BoolValue { + if m == nil { + return (*Value_BoolValue)(nil) + } + r := new(Value_BoolValue) + r.BoolValue = m.BoolValue + return r +} + +func (m *Value_StructValue) CloneVT() *Value_StructValue { + if m == nil { + return (*Value_StructValue)(nil) + } + r := new(Value_StructValue) + r.StructValue = (*structpb.Struct)((*Struct)(m.StructValue).CloneVT()) + return r +} + +func (m *Value_ListValue) CloneVT() *Value_ListValue { + if m == nil { + return (*Value_ListValue)(nil) + } + r := new(Value_ListValue) + r.ListValue = (*structpb.ListValue)((*ListValue)(m.ListValue).CloneVT()) + return r +} + +func (m *ListValue) CloneVT() *ListValue { + if m == nil { + return (*ListValue)(nil) + } + r := new(ListValue) + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Values = tmpContainer + } + return r +} + +func (this *Struct) EqualVT(that *Struct) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Fields) != len(that.Fields) { + return false + } + for i, vx := range this.Fields { + vy, ok := that.Fields[i] + if !ok { + return false + } + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (this *Value) EqualVT(that *Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Kind == nil && that.Kind != nil { + return false + } else if this.Kind != nil { + if that.Kind == nil { + return false + } + switch c := this.Kind.(type) { + case *structpb.Value_NullValue: + if !(*Value_NullValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_NumberValue: + if !(*Value_NumberValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StringValue: + if !(*Value_StringValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_BoolValue: + if !(*Value_BoolValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StructValue: + if !(*Value_StructValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_ListValue: + if !(*Value_ListValue)(c).EqualVT(that.Kind) { + return false + } + } + } + return true +} + +func (this *Value_NullValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NullValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NullValue); ok { + that = (*Value_NullValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NullValue != that.NullValue { + return false + } + return true +} + +func (this *Value_NumberValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NumberValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NumberValue); ok { + that = (*Value_NumberValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NumberValue != that.NumberValue { + return false + } + return true +} + +func (this *Value_StringValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StringValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StringValue); ok { + that = (*Value_StringValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.StringValue != that.StringValue { + return false + } + return true +} + +func (this *Value_BoolValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_BoolValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_BoolValue); ok { + that = (*Value_BoolValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.BoolValue != that.BoolValue { + return false + } + return true +} + +func (this *Value_StructValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StructValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StructValue); ok { + that = (*Value_StructValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.StructValue, that.StructValue; p != q { + if p == nil { + p = &structpb.Struct{} + } + if q == nil { + q = &structpb.Struct{} + } + if !(*Struct)(p).EqualVT((*Struct)(q)) { + return false + } + } + return true +} + +func (this *Value_ListValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_ListValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_ListValue); ok { + that = (*Value_ListValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.ListValue, that.ListValue; p != q { + if p == nil { + p = &structpb.ListValue{} + } + if q == nil { + q = &structpb.ListValue{} + } + if !(*ListValue)(p).EqualVT((*ListValue)(q)) { + return false + } + } + return true +} + +func (this *ListValue) EqualVT(that *ListValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Values) != len(that.Values) { + return false + } + for i, vx := range this.Values { + vy := that.Values[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (m *Struct) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + size, err := (*Value_NullValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_NumberValue: + size, err := (*Value_NumberValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StringValue: + size, err := (*Value_StringValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_BoolValue: + size, err := (*Value_BoolValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StructValue: + size, err := (*Value_StructValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_ListValue: + size, err := (*Value_ListValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m, ok := m.Kind.(*structpb.Value_ListValue); ok { + msg := ((*Value_ListValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StructValue); ok { + msg := ((*Value_StructValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_BoolValue); ok { + msg := ((*Value_BoolValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StringValue); ok { + msg := ((*Value_StringValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NumberValue); ok { + msg := ((*Value_NumberValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NullValue); ok { + msg := ((*Value_NullValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = (*Value)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + n += (*Value_NullValue)(c).SizeVT() + case *structpb.Value_NumberValue: + n += (*Value_NumberValue)(c).SizeVT() + case *structpb.Value_StringValue: + n += (*Value_StringValue)(c).SizeVT() + case *structpb.Value_BoolValue: + n += (*Value_BoolValue)(c).SizeVT() + case *structpb.Value_StructValue: + n += (*Value_StructValue)(c).SizeVT() + case *structpb.Value_ListValue: + n += (*Value_ListValue)(c).SizeVT() + } + return n +} + +func (m *Value_NullValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Value_BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = (*Struct)(m.StructValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Value_ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = (*ListValue)(m.ListValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = (*Value)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + return n +} + +func (m *Struct) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &structpb.Value_StringValue{StringValue: string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Struct) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + if intStringLenmapkey == 0 { + mapkey = "" + } else { + mapkey = unsafe.String(&dAtA[iNdEx], intStringLenmapkey) + } + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVTUnsafe(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Kind = &structpb.Value_StringValue{StringValue: stringValue} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go new file mode 100644 index 000000000..5c63f308f --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/timestamp.proto + +package timestamppb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Timestamp timestamppb.Timestamp + +func (m *Timestamp) CloneVT() *Timestamp { + if m == nil { + return (*Timestamp)(nil) + } + r := new(Timestamp) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Timestamp) EqualVT(that *Timestamp) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Timestamp) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Timestamp) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go new file mode 100644 index 000000000..0dac9b282 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go @@ -0,0 +1,2240 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DoubleValue wrapperspb.DoubleValue +type FloatValue wrapperspb.FloatValue +type Int64Value wrapperspb.Int64Value +type UInt64Value wrapperspb.UInt64Value +type Int32Value wrapperspb.Int32Value +type UInt32Value wrapperspb.UInt32Value +type BoolValue wrapperspb.BoolValue +type StringValue wrapperspb.StringValue +type BytesValue wrapperspb.BytesValue + +func (m *DoubleValue) CloneVT() *DoubleValue { + if m == nil { + return (*DoubleValue)(nil) + } + r := new(DoubleValue) + r.Value = m.Value + return r +} + +func (m *FloatValue) CloneVT() *FloatValue { + if m == nil { + return (*FloatValue)(nil) + } + r := new(FloatValue) + r.Value = m.Value + return r +} + +func (m *Int64Value) CloneVT() *Int64Value { + if m == nil { + return (*Int64Value)(nil) + } + r := new(Int64Value) + r.Value = m.Value + return r +} + +func (m *UInt64Value) CloneVT() *UInt64Value { + if m == nil { + return (*UInt64Value)(nil) + } + r := new(UInt64Value) + r.Value = m.Value + return r +} + +func (m *Int32Value) CloneVT() *Int32Value { + if m == nil { + return (*Int32Value)(nil) + } + r := new(Int32Value) + r.Value = m.Value + return r +} + +func (m *UInt32Value) CloneVT() *UInt32Value { + if m == nil { + return (*UInt32Value)(nil) + } + r := new(UInt32Value) + r.Value = m.Value + return r +} + +func (m *BoolValue) CloneVT() *BoolValue { + if m == nil { + return (*BoolValue)(nil) + } + r := new(BoolValue) + r.Value = m.Value + return r +} + +func (m *StringValue) CloneVT() *StringValue { + if m == nil { + return (*StringValue)(nil) + } + r := new(StringValue) + r.Value = m.Value + return r +} + +func (m *BytesValue) CloneVT() *BytesValue { + if m == nil { + return (*BytesValue)(nil) + } + r := new(BytesValue) + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *DoubleValue) EqualVT(that *DoubleValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *FloatValue) EqualVT(that *FloatValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int64Value) EqualVT(that *Int64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt64Value) EqualVT(that *UInt64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int32Value) EqualVT(that *Int32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt32Value) EqualVT(that *UInt32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BoolValue) EqualVT(that *BoolValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *StringValue) EqualVT(that *StringValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BytesValue) EqualVT(that *BytesValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *DoubleValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *BytesValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *DoubleValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Value = stringValue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index 529603638..1e2fa031b 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -103,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error { switch pub := first.(type) { case *rsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "rsa")) + return errors.New(genErrMsg(first, second, "rsa")) } case *ecdsa.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ecdsa")) + return errors.New(genErrMsg(first, second, "ecdsa")) } case ed25519.PublicKey: if !pub.Equal(second) { - return fmt.Errorf(genErrMsg(first, second, "ed25519")) + return errors.New(genErrMsg(first, second, "ed25519")) } default: return errors.New("unsupported key type") diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index a2b53f8c6..c1b6ef6b7 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -143,6 +143,16 @@ func SubjectFromToken(tok *oidc.IDToken) (string, error) { return subjectFromClaims(claims) } +// SubjectFromUnverifiedToken extracts the subject claim from the raw bytes of +// an OIDC identity token. +func SubjectFromUnverifiedToken(tok []byte) (string, error) { + claims := claims{} + if err := json.Unmarshal(tok, &claims); err != nil { + return "", err + } + return subjectFromClaims(claims) +} + func subjectFromClaims(c claims) (string, error) { if c.Email != "" { if !c.Verified { diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index dfc1f0c0e..6714b3488 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL) fmt.Fprintf(i.GetOutput(), "Enter verification code: ") var code string - fmt.Fscanf(i.GetInput(), "%s", &code) + _, _ = fmt.Fscanf(i.GetInput(), "%s", &code) // New line in case read input doesn't move cursor to next line. fmt.Fprintln(i.GetOutput()) return code diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go index 32bb79aac..eee88c031 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go @@ -415,6 +415,9 @@ func (g *gcpClient) createKeyRing(ctx context.Context) error { KeyRingId: g.keyRing, } result, err := g.kmsClient.CreateKeyRing(ctx, createKeyRingRequest) + if err != nil { + return fmt.Errorf("creating keyring: %w", err) + } log.Printf("Created key ring %s in GCP KMS.\n", result.GetName()) - return err + return nil } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go index 20656bc80..77f418f91 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go @@ -19,6 +19,7 @@ package hashivault import ( "context" "crypto" + "crypto/ed25519" "encoding/base64" "encoding/json" "errors" @@ -220,17 +221,30 @@ func (h *hashivaultClient) fetchPublicKey(_ context.Context) (crypto.PublicKey, return nil, fmt.Errorf("could not parse transit key keys data as map[string]interface{}") } - publicKeyPem, ok := keyMap["public_key"] + publicKey, ok := keyMap["public_key"] if !ok { return nil, errors.New("failed to read transit key keys: corrupted response") } - strPublicKeyPem, ok := publicKeyPem.(string) + strPublicKey, ok := publicKey.(string) if !ok { return nil, fmt.Errorf("could not parse public key pem as string") } + // vault returns the key type in the "name" field + if keyType := keyMap["name"]; keyType == "ed25519" { + // vault returns ed25519 public keys as base64 encoding of + // the raw bytes of the key + decodedPublicKey, err := base64.StdEncoding.DecodeString(strPublicKey) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode ed25519 public key: %s", err) + } + if keyLen := len(decodedPublicKey); keyLen != ed25519.PublicKeySize { + return nil, fmt.Errorf("decoded ed25519 public key length is %d, should be %d", keyLen, ed25519.PublicKeySize) + } + return ed25519.PublicKey(decodedPublicKey), nil + } - return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKeyPem)) + return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKey)) } func (h *hashivaultClient) public() (crypto.PublicKey, error) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go index 35c733755..d03973e72 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go @@ -71,6 +71,7 @@ var ( // getRemoteRoot is a var for testing. var getRemoteRoot = func() string { return DefaultRemoteRoot } +// Deprecated: Use https://pkg.go.dev/github.com/sigstore/sigstore-go/pkg/tuf type TUF struct { sync.Mutex client *client.Client diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go index b6ef1ed53..5ff912612 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go @@ -5,30 +5,51 @@ import ( "time" ) -// backoff defines an linear backoff policy. -type backoff struct { - InitialDelay time.Duration - MaxDelay time.Duration +// BackoffStrategy provides backoff facilities. +type BackoffStrategy interface { + // NewBackoff returns a new backoff for the strategy. The returned + // Backoff is in the same state that it would be in after a call to + // Reset(). + NewBackoff() Backoff +} + +// Backoff provides backoff for a workload API operation. +type Backoff interface { + // Next returns the next backoff period. + Next() time.Duration + + // Reset() resets the backoff. + Reset() +} + +type defaultBackoffStrategy struct{} + +func (defaultBackoffStrategy) NewBackoff() Backoff { + return newLinearBackoff() +} + +// linearBackoff defines an linear backoff policy. +type linearBackoff struct { + initialDelay time.Duration + maxDelay time.Duration n int } -func newBackoff() *backoff { - return &backoff{ - InitialDelay: time.Second, - MaxDelay: 30 * time.Second, +func newLinearBackoff() *linearBackoff { + return &linearBackoff{ + initialDelay: time.Second, + maxDelay: 30 * time.Second, n: 0, } } -// Duration returns the next wait period for the backoff. Not goroutine-safe. -func (b *backoff) Duration() time.Duration { +func (b *linearBackoff) Next() time.Duration { backoff := float64(b.n) + 1 - d := math.Min(b.InitialDelay.Seconds()*backoff, b.MaxDelay.Seconds()) + d := math.Min(b.initialDelay.Seconds()*backoff, b.maxDelay.Seconds()) b.n++ return time.Duration(d) * time.Second } -// Reset resets the backoff's state. -func (b *backoff) Reset() { +func (b *linearBackoff) Reset() { b.n = 0 } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go index 4d5de5d59..7739798b5 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go @@ -119,7 +119,7 @@ func (c *Client) FetchX509Bundles(ctx context.Context) (*x509bundle.Set, error) // WatchX509Bundles watches for changes to the X.509 bundles. The watcher receives // the updated X.509 bundles. func (c *Client) WatchX509Bundles(ctx context.Context, watcher X509BundleWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchX509Bundles(ctx, watcher, backoff) watcher.OnX509BundlesWatchError(err) @@ -152,7 +152,7 @@ func (c *Client) FetchX509Context(ctx context.Context) (*X509Context, error) { // WatchX509Context watches for updates to the X.509 context. The watcher // receives the updated X.509 context. func (c *Client) WatchX509Context(ctx context.Context, watcher X509ContextWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchX509Context(ctx, watcher, backoff) watcher.OnX509ContextWatchError(err) @@ -224,7 +224,7 @@ func (c *Client) FetchJWTBundles(ctx context.Context) (*jwtbundle.Set, error) { // WatchJWTBundles watches for changes to the JWT bundles. The watcher receives // the updated JWT bundles. func (c *Client) WatchJWTBundles(ctx context.Context, watcher JWTBundleWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchJWTBundles(ctx, watcher, backoff) watcher.OnJWTBundlesWatchError(err) @@ -258,7 +258,7 @@ func (c *Client) newConn(ctx context.Context) (*grpc.ClientConn, error) { return grpc.DialContext(ctx, c.config.address, c.config.dialOptions...) //nolint:staticcheck // preserve backcompat with WithDialOptions option } -func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backoff) error { +func (c *Client) handleWatchError(ctx context.Context, err error, backoff Backoff) error { code := status.Code(err) if code == codes.Canceled { return err @@ -270,7 +270,7 @@ func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backo } c.config.log.Errorf("Failed to watch the Workload API: %v", err) - retryAfter := backoff.Duration() + retryAfter := backoff.Next() c.config.log.Debugf("Retrying watch in %s", retryAfter) select { case <-time.After(retryAfter): @@ -281,7 +281,7 @@ func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backo } } -func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatcher, backoff *backoff) error { +func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -308,7 +308,7 @@ func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatche } } -func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, backoff *backoff) error { +func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -335,7 +335,7 @@ func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, } } -func (c *Client) watchX509Bundles(ctx context.Context, watcher X509BundleWatcher, backoff *backoff) error { +func (c *Client) watchX509Bundles(ctx context.Context, watcher X509BundleWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -402,7 +402,8 @@ func withHeader(ctx context.Context) context.Context { func defaultClientConfig() clientConfig { return clientConfig{ - log: logger.Null, + log: logger.Null, + backoffStrategy: defaultBackoffStrategy{}, } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go index 47ea83ade..112235390 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go @@ -16,6 +16,7 @@ var jwtsourceErr = errs.Class("jwtsource") // Workload API. type JWTSource struct { watcher *watcher + picker func([]*jwtsvid.SVID) *jwtsvid.SVID mtx sync.RWMutex bundles *jwtbundle.Set @@ -33,7 +34,9 @@ func NewJWTSource(ctx context.Context, options ...JWTSourceOption) (_ *JWTSource option.configureJWTSource(config) } - s := &JWTSource{} + s := &JWTSource{ + picker: config.picker, + } s.watcher, err = newWatcher(ctx, config.watcher, nil, s.setJWTBundles) if err != nil { @@ -61,7 +64,22 @@ func (s *JWTSource) FetchJWTSVID(ctx context.Context, params jwtsvid.Params) (*j if err := s.checkClosed(); err != nil { return nil, err } - return s.watcher.client.FetchJWTSVID(ctx, params) + + var ( + svid *jwtsvid.SVID + err error + ) + if s.picker == nil { + svid, err = s.watcher.client.FetchJWTSVID(ctx, params) + } else { + svids, err := s.watcher.client.FetchJWTSVIDs(ctx, params) + if err != nil { + return svid, err + } + svid = s.picker(svids) + } + + return svid, err } // FetchJWTSVIDs fetches all JWT-SVIDs from the source with the given parameters. diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go index 00cab7d16..f596f30c4 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go @@ -2,6 +2,7 @@ package workloadapi import ( "github.com/spiffe/go-spiffe/v2/logger" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" "github.com/spiffe/go-spiffe/v2/svid/x509svid" "google.golang.org/grpc" ) @@ -35,6 +36,14 @@ func WithLogger(logger logger.Logger) ClientOption { }) } +// WithBackoff provides a custom backoff strategy that replaces the +// default backoff strategy (linear backoff). +func WithBackoffStrategy(backoffStrategy BackoffStrategy) ClientOption { + return clientOption(func(c *clientConfig) { + c.backoffStrategy = backoffStrategy + }) +} + // SourceOption are options that are shared among all option types. type SourceOption interface { configureX509Source(*x509SourceConfig) @@ -60,12 +69,12 @@ type X509SourceOption interface { configureX509Source(*x509SourceConfig) } -// WithDefaultX509SVIDPicker provides a function that is used to determine the -// default X509-SVID when more than one is provided by the Workload API. By -// default, the first X509-SVID in the list returned by the Workload API is +// WithDefaultJWTSVIDPicker provides a function that is used to determine the +// default JWT-SVID when more than one is provided by the Workload API. By +// default, the first JWT-SVID in the list returned by the Workload API is // used. -func WithDefaultX509SVIDPicker(picker func([]*x509svid.SVID) *x509svid.SVID) X509SourceOption { - return withDefaultX509SVIDPicker{picker: picker} +func WithDefaultJWTSVIDPicker(picker func([]*jwtsvid.SVID) *jwtsvid.SVID) JWTSourceOption { + return withDefaultJWTSVIDPicker{picker: picker} } // JWTSourceOption is an option for the JWTSource. A SourceOption is also a @@ -74,6 +83,14 @@ type JWTSourceOption interface { configureJWTSource(*jwtSourceConfig) } +// WithDefaultX509SVIDPicker provides a function that is used to determine the +// default X509-SVID when more than one is provided by the Workload API. By +// default, the first X509-SVID in the list returned by the Workload API is +// used. +func WithDefaultX509SVIDPicker(picker func([]*x509svid.SVID) *x509svid.SVID) X509SourceOption { + return withDefaultX509SVIDPicker{picker: picker} +} + // BundleSourceOption is an option for the BundleSource. A SourceOption is also // a BundleSourceOption. type BundleSourceOption interface { @@ -81,10 +98,11 @@ type BundleSourceOption interface { } type clientConfig struct { - address string - namedPipeName string - dialOptions []grpc.DialOption - log logger.Logger + address string + namedPipeName string + dialOptions []grpc.DialOption + log logger.Logger + backoffStrategy BackoffStrategy } type clientOption func(*clientConfig) @@ -100,6 +118,7 @@ type x509SourceConfig struct { type jwtSourceConfig struct { watcher watcherConfig + picker func([]*jwtsvid.SVID) *jwtsvid.SVID } type bundleSourceConfig struct { @@ -145,3 +164,11 @@ type withDefaultX509SVIDPicker struct { func (o withDefaultX509SVIDPicker) configureX509Source(config *x509SourceConfig) { config.picker = o.picker } + +type withDefaultJWTSVIDPicker struct { + picker func([]*jwtsvid.SVID) *jwtsvid.SVID +} + +func (o withDefaultJWTSVIDPicker) configureJWTSource(config *jwtSourceConfig) { + config.picker = o.picker +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go index b3fcd0948..d73264166 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go @@ -37,6 +37,7 @@ import ( const ( StorageTypeDocDB = "docdb" + mongoEnv = "MONGO_SERVER_URL" ) // ErrNothingToWatch is an error that's returned when the backend doesn't have anything to "watch" @@ -98,34 +99,16 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) return nil, ErrNothingToWatch } - // Set up watcher only when `storage.docdb.mongo-server-url-dir` is set - if cfg.Storage.DocDB.MongoServerURLDir == "" { + pathsToWatch := getPathsToWatch(ctx, cfg) + if len(pathsToWatch) == 0 { return nil, ErrNothingToWatch } - logger.Infof("setting up fsnotify watcher for directory: %s", cfg.Storage.DocDB.MongoServerURLDir) - watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err } - pathsToWatch := []string{ - // mongo-server-url-dir/MONGO_SERVER_URL is where the MONGO_SERVER_URL environment - // variable is expected to be mounted, either manually or via a Kubernetes secret, etc. - filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "MONGO_SERVER_URL"), - // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted - // under path/..data that is then `symlink`ed to the key of the data. In this instance, - // the mounted path is going to look like: - // file 1 - ..2024_05_03_11_23_23.1253599725 - // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 - // file 3 - MONGO_SERVER_URL -> ..data/MONGO_SERVER_URL - // So each time the secret is updated, the file `MONGO_SERVER_URL` is not updated, - // instead the underlying symlink at `..data` is updated and that's what we want to - // capture via the fsnotify event watcher - filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "..data"), - } - backendChan := make(chan *Backend) // Start listening for events. go func() { @@ -145,13 +128,23 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) continue } - updatedEnv, err := getMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir) - if err != nil { - logger.Error(err) - backendChan <- nil + var updatedEnv string + if cfg.Storage.DocDB.MongoServerURLPath != "" { + updatedEnv, err = getMongoServerURLFromPath(cfg.Storage.DocDB.MongoServerURLPath) + if err != nil { + logger.Error(err) + backendChan <- nil + } + } else if cfg.Storage.DocDB.MongoServerURLDir != "" { + updatedEnv, err = getMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir) + if err != nil { + logger.Error(err) + backendChan <- nil + } } + if updatedEnv != os.Getenv("MONGO_SERVER_URL") { - logger.Infof("directory %s has been updated, reconfiguring backend...", cfg.Storage.DocDB.MongoServerURLDir) + logger.Info("Mongo server url has been updated, reconfiguring backend...") // Now that MONGO_SERVER_URL has been updated, we should update docdb backend again newDocDBBackend, err := NewStorageBackend(ctx, cfg) @@ -179,11 +172,23 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) } }() - // Add a path. - err = watcher.Add(cfg.Storage.DocDB.MongoServerURLDir) - if err != nil { - return nil, err + if cfg.Storage.DocDB.MongoServerURLPath != "" { + dirPath := filepath.Dir(cfg.Storage.DocDB.MongoServerURLPath) + // Add a path. + err = watcher.Add(dirPath) + if err != nil { + return nil, err + } } + + if cfg.Storage.DocDB.MongoServerURLDir != "" { + // Add a path. + err = watcher.Add(cfg.Storage.DocDB.MongoServerURLDir) + if err != nil { + return nil, err + } + } + return backendChan, nil } @@ -256,27 +261,43 @@ func (b *Backend) retrieveDocuments(ctx context.Context, opts config.StorageOpts } func populateMongoServerURL(ctx context.Context, cfg config.Config) error { - // First preference is given to the key `storage.docdb.mongo-server-url-dir`. + // First preference is given to the key `storage.docdb.mongo-server-url-path`. + // If that doesn't exist, then we move on to `storage.docdb.mongo-server-url-dir`. // If that doesn't exist, then we move on to `storage.docdb.mongo-server-url`. // If that doesn't exist, then we check if `MONGO_SERVER_URL` env var is set. logger := logging.FromContext(ctx) - mongoEnv := "MONGO_SERVER_URL" + + if cfg.Storage.DocDB.MongoServerURLPath != "" { + logger.Infof("setting %s from storage.docdb.mongo-server-url-path: %s", mongoEnv, cfg.Storage.DocDB.MongoServerURLPath) + mongoServerURL, err := getMongoServerURLFromPath(cfg.Storage.DocDB.MongoServerURLPath) + if err != nil { + return err + } + if err := os.Setenv(mongoEnv, mongoServerURL); err != nil { + return err + } + return nil + } if cfg.Storage.DocDB.MongoServerURLDir != "" { logger.Infof("setting %s from storage.docdb.mongo-server-url-dir: %s", mongoEnv, cfg.Storage.DocDB.MongoServerURLDir) if err := setMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir); err != nil { return err } - } else if cfg.Storage.DocDB.MongoServerURL != "" { + return nil + } + + if cfg.Storage.DocDB.MongoServerURL != "" { logger.Infof("setting %s from storage.docdb.mongo-server-url", mongoEnv) if err := os.Setenv(mongoEnv, cfg.Storage.DocDB.MongoServerURL); err != nil { return err } + return nil } if _, envExists := os.LookupEnv(mongoEnv); !envExists { return fmt.Errorf("mongo docstore configured but %s environment variable not set, "+ - "supply one of storage.docdb.mongo-server-url-dir, storage.docdb.mongo-server-url or set %s", mongoEnv, mongoEnv) + "supply one of storage.docdb.mongo-server-url-path, storage.docdb.mongo-server-url-dir, storage.docdb.mongo-server-url or set %s", mongoEnv, mongoEnv) } return nil @@ -288,7 +309,7 @@ func setMongoServerURLFromDir(dir string) error { return err } - if err = os.Setenv("MONGO_SERVER_URL", fileDataNormalized); err != nil { + if err = os.Setenv(mongoEnv, fileDataNormalized); err != nil { return err } @@ -296,8 +317,6 @@ func setMongoServerURLFromDir(dir string) error { } func getMongoServerURLFromDir(dir string) (string, error) { - mongoEnv := "MONGO_SERVER_URL" - stat, err := os.Stat(dir) if err != nil { if os.IsNotExist(err) { @@ -327,3 +346,62 @@ func getMongoServerURLFromDir(dir string) (string, error) { return fileDataNormalized, nil } + +// getMongoServerUrlFromPath retreives token from the given mount path +func getMongoServerURLFromPath(path string) (string, error) { + fileData, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("reading file in %q: %w", path, err) + } + + // A trailing newline is fairly common in mounted files, so remove it. + fileDataNormalized := strings.TrimSuffix(string(fileData), "\n") + return fileDataNormalized, nil +} + +func getPathsToWatch(ctx context.Context, cfg config.Config) []string { + var pathsToWatch = []string{} + logger := logging.FromContext(ctx) + + if cfg.Storage.DocDB.MongoServerURLPath != "" { + logger.Infof("setting up fsnotify watcher for mongo server url path: %s", cfg.Storage.DocDB.MongoServerURLPath) + dirPath := filepath.Dir(cfg.Storage.DocDB.MongoServerURLPath) + pathsToWatch = []string{ + // mongo-server-url-path/ is where the mongo server url token + // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted + // under path/..data that is then `symlink`ed to the key of the data. In this instance, + // the mounted path is going to look like: + // file 1 - ..2024_05_03_11_23_23.1253599725 + // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 + // file 3 - ..data/ + // So each time the secret is updated, the file is not updated, + // instead the underlying symlink at `..data` is updated and that's what we want to + // capture via the fsnotify event watcher + // filepath.Join(cfg.Storage.DocDB.MongoServerURLPath, "..data"), + filepath.Join(dirPath, "..data"), + } + return pathsToWatch + } + + if cfg.Storage.DocDB.MongoServerURLDir != "" { + logger.Infof("setting up fsnotify watcher for directory: %s", cfg.Storage.DocDB.MongoServerURLDir) + pathsToWatch = []string{ + // mongo-server-url-dir/MONGO_SERVER_URL is where the MONGO_SERVER_URL environment + // variable is expected to be mounted, either manually or via a Kubernetes secret, etc. + filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "MONGO_SERVER_URL"), + // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted + // under path/..data that is then `symlink`ed to the key of the data. In this instance, + // the mounted path is going to look like: + // file 1 - ..2024_05_03_11_23_23.1253599725 + // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 + // file 3 - MONGO_SERVER_URL -> ..data/MONGO_SERVER_URL + // So each time the secret is updated, the file `MONGO_SERVER_URL` is not updated, + // instead the underlying symlink at `..data` is updated and that's what we want to + // capture via the fsnotify event watcher + filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "..data"), + } + return pathsToWatch + } + + return pathsToWatch +} diff --git a/vendor/github.com/tektoncd/chains/pkg/config/config.go b/vendor/github.com/tektoncd/chains/pkg/config/config.go index 7175ac2bc..4e9cef101 100644 --- a/vendor/github.com/tektoncd/chains/pkg/config/config.go +++ b/vendor/github.com/tektoncd/chains/pkg/config/config.go @@ -123,9 +123,10 @@ type TektonStorageConfig struct { } type DocDBStorageConfig struct { - URL string - MongoServerURL string - MongoServerURLDir string + URL string + MongoServerURL string + MongoServerURLDir string + MongoServerURLPath string } type GrafeasConfig struct { @@ -168,12 +169,13 @@ const ( ociStorageKey = "artifacts.oci.storage" ociSignerKey = "artifacts.oci.signer" - gcsBucketKey = "storage.gcs.bucket" - ociRepositoryKey = "storage.oci.repository" - ociRepositoryInsecureKey = "storage.oci.repository.insecure" - docDBUrlKey = "storage.docdb.url" - docDBMongoServerURLKey = "storage.docdb.mongo-server-url" - docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" + gcsBucketKey = "storage.gcs.bucket" + ociRepositoryKey = "storage.oci.repository" + ociRepositoryInsecureKey = "storage.oci.repository.insecure" + docDBUrlKey = "storage.docdb.url" + docDBMongoServerURLKey = "storage.docdb.mongo-server-url" + docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" + docDBMongoServerURLPathKey = "storage.docdb.mongo-server-url-path" grafeasProjectIDKey = "storage.grafeas.projectid" grafeasNoteIDKey = "storage.grafeas.noteid" @@ -302,6 +304,7 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { asString(docDBUrlKey, &cfg.Storage.DocDB.URL), asString(docDBMongoServerURLKey, &cfg.Storage.DocDB.MongoServerURL), asString(docDBMongoServerURLDirKey, &cfg.Storage.DocDB.MongoServerURLDir), + asString(docDBMongoServerURLPathKey, &cfg.Storage.DocDB.MongoServerURLPath), asString(grafeasProjectIDKey, &cfg.Storage.Grafeas.ProjectID), asString(grafeasNoteIDKey, &cfg.Storage.Grafeas.NoteID), asString(grafeasNoteHint, &cfg.Storage.Grafeas.NoteHint), diff --git a/vendor/github.com/tektoncd/cli/pkg/bundle/reader.go b/vendor/github.com/tektoncd/cli/pkg/bundle/reader.go index ecc312baf..3c7672537 100644 --- a/vendor/github.com/tektoncd/cli/pkg/bundle/reader.go +++ b/vendor/github.com/tektoncd/cli/pkg/bundle/reader.go @@ -116,7 +116,7 @@ func readTarLayer(l v1.Layer) ([]byte, error) { } contents := make([]byte, header.Size) - if _, err := treader.Read(contents); err != nil && err != io.EOF { + if _, err := io.ReadFull(treader, contents); err != nil && err != io.EOF { // We only allow 1 resource per layer so this tar bundle should have one and only one file. return nil, fmt.Errorf("failed to read tar bundle: %w", err) } diff --git a/vendor/github.com/tektoncd/cli/pkg/cmd/customrun/delete.go b/vendor/github.com/tektoncd/cli/pkg/cmd/customrun/delete.go index ea520c129..4eb16c079 100644 --- a/vendor/github.com/tektoncd/cli/pkg/cmd/customrun/delete.go +++ b/vendor/github.com/tektoncd/cli/pkg/cmd/customrun/delete.go @@ -17,42 +17,26 @@ package customrun import ( "context" "fmt" - "os" "github.com/spf13/cobra" "github.com/tektoncd/cli/pkg/actions" "github.com/tektoncd/cli/pkg/cli" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "go.uber.org/multierr" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" ) -func crExists(args []string, p cli.Params) ([]string, error) { - availableCrs := make([]string, 0) - c, err := p.Clients() +func customRunExists(client *cli.Clients, namespace string, crName string) error { + var cr *v1beta1.CustomRun + err := actions.GetV1(customrunGroupResource, client, crName, namespace, metav1.GetOptions{}, &cr) if err != nil { - return availableCrs, err - } - var errorList error - ns := p.Namespace() - for _, name := range args { - var cr *v1beta1.CustomRun - err := actions.GetV1(customrunGroupResource, c, name, ns, metav1.GetOptions{}, &cr) - if err != nil { - errorList = multierr.Append(errorList, err) - if !errors.IsNotFound(err) { - fmt.Fprintf(os.Stderr, "Error checking CustomRun %s in namespace %s: %v\n", name, ns, err) - continue - } - // CustomRun not found, skip to the next - fmt.Fprintf(os.Stderr, "CustomRun %s not found in namespace %s\n", name, ns) - continue + if !errors.IsNotFound(err) { + return err } - availableCrs = append(availableCrs, name) + return fmt.Errorf("CustomRun %s not found in namespace %s", crName, namespace) } - return availableCrs, nil + return nil } func deleteCommand(p cli.Params) *cobra.Command { @@ -100,14 +84,14 @@ func deleteCustomRuns(s *cli.Stream, p cli.Params, crNames []string) error { namespace := p.Namespace() for _, crName := range crNames { // Check if CustomRun exists before attempting deletion - exists, _ := crExists([]string{crName}, p) - if len(exists) == 0 { + err := customRunExists(cs, namespace, crName) + if err != nil { fmt.Fprintf(s.Err, "CustomRun %s not found in namespace %s\n", crName, namespace) continue } // Proceed with deletion - err := deleteCustomRun(cs, namespace, crName) + err = deleteCustomRun(cs, namespace, crName) if err == nil { fmt.Fprintf(s.Out, "CustomRun '%s' deleted successfully from namespace '%s'\n", crName, namespace) } else { diff --git a/vendor/github.com/tektoncd/cli/pkg/export/export.go b/vendor/github.com/tektoncd/cli/pkg/export/export.go index 30ae6d752..8bdee44d6 100644 --- a/vendor/github.com/tektoncd/cli/pkg/export/export.go +++ b/vendor/github.com/tektoncd/cli/pkg/export/export.go @@ -19,29 +19,39 @@ import ( ) func RemoveFieldForExport(obj *unstructured.Unstructured) error { + content := obj.UnstructuredContent() + // remove the status from pipelinerun and taskrun - unstructured.RemoveNestedField(obj.UnstructuredContent(), "status") + unstructured.RemoveNestedField(content, "status") // remove some metadata information of previous resource - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "managedFields") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "resourceVersion") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "uid") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "generation") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "namespace") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "creationTimestamp") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "ownerReferences") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration") - _, exist, err := unstructured.NestedString(obj.UnstructuredContent(), "metadata", "generateName") - if err != nil { - return err + metadataFields := []string{ + "managedFields", + "resourceVersion", + "uid", + "finalizers", + "generation", + "namespace", + "creationTimestamp", + "ownerReferences", + } + for _, field := range metadataFields { + unstructured.RemoveNestedField(content, "metadata", field) } - if exist { - unstructured.RemoveNestedField(obj.UnstructuredContent(), "metadata", "name") + unstructured.RemoveNestedField(content, "metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration") + + // check if generateName exists and remove name if it does + if _, exist, err := unstructured.NestedString(content, "metadata", "generateName"); err != nil { + return err + } else if exist { + unstructured.RemoveNestedField(content, "metadata", "name") } // remove the status from spec which are related to status - unstructured.RemoveNestedField(obj.UnstructuredContent(), "spec", "status") - unstructured.RemoveNestedField(obj.UnstructuredContent(), "spec", "statusMessage") + specFields := []string{"status", "statusMessage"} + for _, field := range specFields { + unstructured.RemoveNestedField(content, "spec", field) + } return nil } diff --git a/vendor/github.com/tektoncd/cli/pkg/flags/flags.go b/vendor/github.com/tektoncd/cli/pkg/flags/flags.go index b939b4a95..b08321dcb 100644 --- a/vendor/github.com/tektoncd/cli/pkg/flags/flags.go +++ b/vendor/github.com/tektoncd/cli/pkg/flags/flags.go @@ -135,6 +135,8 @@ func InitParams(p cli.Params, cmd *cobra.Command) error { p.SetNoColour(nocolourFlag) // Make sure we set as Nocolour if we don't have a terminal (ie redirection) + // nolint + // this conversion is throwing error for golangci-lint G115 if !term.IsTerminal(int(os.Stdout.Fd())) { p.SetNoColour(true) } diff --git a/vendor/github.com/tektoncd/cli/pkg/formatted/color.go b/vendor/github.com/tektoncd/cli/pkg/formatted/color.go index 331058079..d4e8cbcd0 100644 --- a/vendor/github.com/tektoncd/cli/pkg/formatted/color.go +++ b/vendor/github.com/tektoncd/cli/pkg/formatted/color.go @@ -115,6 +115,8 @@ type atomicCounter struct { func (c *atomicCounter) next() int { v := atomic.AddUint32(&c.value, 1) next := int(v-1) % c.threshold + // nolint + // this conversion is throwing error for golangci-lint G115 atomic.CompareAndSwapUint32(&c.value, uint32(c.threshold), 0) return next diff --git a/vendor/github.com/tektoncd/cli/pkg/formatted/k8s.go b/vendor/github.com/tektoncd/cli/pkg/formatted/k8s.go index 00cfb0d43..50829e824 100644 --- a/vendor/github.com/tektoncd/cli/pkg/formatted/k8s.go +++ b/vendor/github.com/tektoncd/cli/pkg/formatted/k8s.go @@ -67,7 +67,9 @@ func Condition(c v1.Conditions) string { status = "Running" } - if c[0].Reason != "" && c[0].Reason != status { + if c[0].Reason == "Completed" && status == "Succeeded" { + return ColorStatus(status) + } else if c[0].Reason != "" && c[0].Reason != status { switch c[0].Reason { case "PipelineRunCancelled", "TaskRunCancelled", "Cancelled": return ColorStatus("Cancelled") + "(" + c[0].Reason + ")" diff --git a/vendor/github.com/tektoncd/cli/pkg/suggestion/suggest.go b/vendor/github.com/tektoncd/cli/pkg/suggestion/suggest.go index 136fc5c9e..97d5989b7 100644 --- a/vendor/github.com/tektoncd/cli/pkg/suggestion/suggest.go +++ b/vendor/github.com/tektoncd/cli/pkg/suggestion/suggest.go @@ -113,14 +113,14 @@ func levenshteinDistance(s, t string, ignoreCase bool) int { if s[i-1] == t[j-1] { d[i][j] = d[i-1][j-1] } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] + minCost := d[i-1][j] + if d[i][j-1] < minCost { + minCost = d[i][j-1] } - if d[i-1][j-1] < min { - min = d[i-1][j-1] + if d[i-1][j-1] < minCost { + minCost = d[i-1][j-1] } - d[i][j] = min + 1 + d[i][j] = minCost + 1 } } diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/client.go b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/client.go index cdf34ec93..242f85e72 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/client.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/client.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog client // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/endpoints.go b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/endpoints.go index db6188a80..f3f491f43 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/endpoints.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/endpoints.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog endpoints // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/service.go b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/service.go index 67bf54ce2..d001e8b55 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/service.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/service.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog service // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/views/view.go b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/views/view.go index 42cf79e20..e017792cb 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/views/view.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/catalog/views/view.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog views // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/cli.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/cli.go index f7790f055..e1764f0af 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/cli.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/cli.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog HTTP client CLI support package // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/client.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/client.go index 6cbc95925..81e03f6f7 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/client.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/client.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog client HTTP transport // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/encode_decode.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/encode_decode.go index 7de67535c..37a877713 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/encode_decode.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/encode_decode.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog HTTP client encoders and decoders // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/paths.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/paths.go index f239b106e..dcbfd2eca 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/paths.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/paths.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // HTTP request path constructors for the catalog service. // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/types.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/types.go index 02fe08dbd..ea06713e8 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/types.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/catalog/client/types.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // catalog HTTP client types // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/cli.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/cli.go index 1814d457e..adb280273 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/cli.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/cli.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource HTTP client CLI support package // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/client.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/client.go index 611aa20e7..4d9b4ef41 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/client.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/client.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource client HTTP transport // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/encode_decode.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/encode_decode.go index ca4d34822..8687a34a2 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/encode_decode.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/encode_decode.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource HTTP client encoders and decoders // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/paths.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/paths.go index 2546a033d..ee76ef86d 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/paths.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/paths.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // HTTP request path constructors for the resource service. // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/types.go b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/types.go index 743ccac74..4403168f1 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/types.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/http/resource/client/types.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource HTTP client types // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/client.go b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/client.go index 85ca82c8a..bf1fa9cd7 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/client.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/client.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource client // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/endpoints.go b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/endpoints.go index 5251ad21d..51b724623 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/endpoints.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/endpoints.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource endpoints // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/service.go b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/service.go index 4d15f835c..6cccfd87b 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/service.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/service.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource service // diff --git a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/views/view.go b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/views/view.go index 33989d161..8f5360411 100644 --- a/vendor/github.com/tektoncd/hub/api/v1/gen/resource/views/view.go +++ b/vendor/github.com/tektoncd/hub/api/v1/gen/resource/views/view.go @@ -1,4 +1,4 @@ -// Code generated by goa v3.18.2, DO NOT EDIT. +// Code generated by goa v3.19.1, DO NOT EDIT. // // resource views // diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/swagger.json b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/swagger.json index 682fad088..bc517acff 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/swagger.json @@ -174,6 +174,14 @@ "default": "" } }, + "priorityClassName": { + "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext sets the security context for the pod", + "$ref": "#/definitions/v1.PodSecurityContext" + }, "tolerations": { "description": "If specified, the pod's tolerations.", "type": "array", @@ -2639,6 +2647,14 @@ "description": "TaskBreakpoints defines the breakpoint config for a particular Task", "type": "object", "properties": { + "beforeSteps": { + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, "onFailure": { "description": "if enabled, pause TaskRun on failure of a step failed step will not exit", "type": "string" diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go index 3ccb41b5e..1360057a2 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go +++ b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go @@ -31,18 +31,60 @@ type GroupServiceAccount struct { UserName string `json:"username"` } -// CreateServiceAccount create a new service account user for a group. +// ListServiceAccountsOptions represents the available ListServiceAccounts() options. // -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +type ListServiceAccountsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListServiceAccounts gets a list of service acxcounts. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var sa []*GroupServiceAccount + resp, err := s.client.Do(req, &sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountOptions represents the available CreateServiceAccount() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user +type CreateServiceAccountOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// Creates a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) if err != nil { return nil, nil, err } @@ -60,7 +102,7 @@ func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...Request // CreateServiceAccountPersonalAccessToken() options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user type CreateServiceAccountPersonalAccessTokenOptions struct { Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` @@ -71,7 +113,7 @@ type CreateServiceAccountPersonalAccessTokenOptions struct { // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { group, err := parseID(gid) if err != nil { @@ -117,3 +159,23 @@ func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, return pat, resp, nil } + +// DeleteServiceAccount Deletes a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user +func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go index 52fd13f6c..34f0cab66 100644 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ b/vendor/github.com/xanzy/go-gitlab/groups.go @@ -393,7 +393,7 @@ type CreateGroupOptions struct { type DefaultBranchProtectionDefaultsOptions struct { AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge.omitempty" json:"allowed_to_merge.omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go index 6df4eff2b..cc23f265d 100644 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ b/vendor/github.com/xanzy/go-gitlab/projects.go @@ -309,6 +309,7 @@ type ProjectApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` Users []*BasicUser `json:"users"` @@ -1043,6 +1044,44 @@ func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionF return p, resp, nil } +// ListProjectInvidedGroupOptions represents the available +// ListProjectsInvitedGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +type ListProjectInvidedGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// ListProjectsInvitedGroups lists invited groups of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pg []*ProjectGroup + resp, err := s.client.Do(req, &pg) + if err != nil { + return nil, resp, err + } + + return pg, resp, nil +} + // UnstarProject unstars a given project. // // GitLab API docs: diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go index a7eeb72d3..fcaa71ecc 100644 --- a/vendor/github.com/xanzy/go-gitlab/services.go +++ b/vendor/github.com/xanzy/go-gitlab/services.go @@ -1102,12 +1102,22 @@ type JiraService struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#jira type JiraServiceProperties struct { - URL string `json:"url"` - APIURL string `json:"api_url"` - ProjectKeys []string `json:"project_keys" ` - Username string `json:"username" ` - Password string `json:"password" ` - JiraIssueTransitionID string `json:"jira_issue_transition_id"` + URL string `json:"url"` + APIURL string `json:"api_url"` + Username string `json:"username" ` + Password string `json:"password" ` + Active bool `json:"active"` + JiraAuthType int `json:"jira_auth_type"` + JiraIssuePrefix string `json:"jira_issue_prefix"` + JiraIssueRegex string `json:"jira_issue_regex"` + JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` + JiraIssueTransitionID string `json:"jira_issue_transition_id"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + IssuesEnabled bool `json:"issues_enabled"` + ProjectKeys []string `json:"project_keys" ` + UseInheritedSettings bool `json:"use_inherited_settings"` // Deprecated: This parameter was removed in GitLab 17.0 ProjectKey string `json:"project_key" ` @@ -1152,7 +1162,7 @@ func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOpti if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -1204,7 +1214,7 @@ func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOpt if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { @@ -1223,7 +1233,7 @@ func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestO if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { diff --git a/vendor/github.com/youmark/pkcs8/.travis.yml b/vendor/github.com/youmark/pkcs8/.travis.yml deleted file mode 100644 index 3608f7dc7..000000000 --- a/vendor/github.com/youmark/pkcs8/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -arch: - - amd64 - - ppc64le -language: go - -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - master - -script: - - go test -v ./... diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 7e08aab35..fc4a7b1db 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -330,7 +330,7 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade case reflect.Int64: return reflect.ValueOf(i64), nil case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int + if i64 > math.MaxInt { // Can we fit this inside of an int return emptyValue, fmt.Errorf("%d overflows int", i64) } @@ -434,7 +434,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return fmt.Errorf("%d overflows uint64", i64) } case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint return fmt.Errorf("%d overflows uint", i64) } default: diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 852547276..39b07135b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -164,11 +164,15 @@ func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refl return reflect.ValueOf(uint64(i64)), nil case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint return emptyValue, fmt.Errorf("%d overflows uint", i64) } - return reflect.ValueOf(uint(i64)), nil + return reflect.ValueOf(uint(v)), nil default: return emptyValue, ValueDecoderError{ Name: "UintDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 969570424..af6ae7b76 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -95,9 +95,9 @@ func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) { return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t) } - i, err := strconv.ParseInt(val.v.(string), 16, 64) + i, err := strconv.ParseUint(val.v.(string), 16, 8) if err != nil { - return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string)) + return nil, 0, fmt.Errorf("invalid $binary subType string: %q: %w", val.v.(string), err) } subType = byte(i) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index a242bb57c..0e07d5055 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -842,7 +842,7 @@ func (vr *valueReader) peekLength() (int32, error) { } idx := vr.offset - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readLength() (int32, error) { return vr.readi32() } @@ -854,7 +854,7 @@ func (vr *valueReader) readi32() (int32, error) { idx := vr.offset vr.offset += 4 - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readu32() (uint32, error) { @@ -864,7 +864,7 @@ func (vr *valueReader) readu32() (uint32, error) { idx := vr.offset vr.offset += 4 - return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil + return binary.LittleEndian.Uint32(vr.d[idx:]), nil } func (vr *valueReader) readi64() (int64, error) { @@ -874,8 +874,7 @@ func (vr *valueReader) readi64() (int64, error) { idx := vr.offset vr.offset += 8 - return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 | - int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil + return int64(binary.LittleEndian.Uint64(vr.d[idx:])), nil } func (vr *valueReader) readu64() (uint64, error) { @@ -885,6 +884,5 @@ func (vr *valueReader) readu64() (uint64, error) { idx := vr.offset vr.offset += 8 - return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 | - uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil + return binary.LittleEndian.Uint64(vr.d[idx:]), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 4d1bfb316..a8088e1e3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -88,8 +88,12 @@ func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval) } -// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext -// instead of the one attached or the default registry. +// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses +// the provided DecodeContext instead of the one attached or the default +// registry. +// +// Deprecated: Use [RawValue.UnmarshalWithRegistry] with a custom registry to customize +// unmarshal behavior instead. func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error { if dc == nil { return ErrNilContext diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index b5b0f3568..d6afb2850 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -10,15 +10,27 @@ import ( "go.mongodb.org/mongo-driver/bson/bsoncodec" ) -// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the -// primitive codecs. +// DefaultRegistry is the default bsoncodec.Registry. It contains the default +// codecs and the primitive codecs. +// +// Deprecated: Use [NewRegistry] to construct a new default registry. To use a +// custom registry when marshaling or unmarshaling, use the "SetRegistry" method +// on an [Encoder] or [Decoder] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Encoder] and [Decoder] for more examples. var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. // -// Deprecated: Use NewRegistry instead. +// Deprecated: Use [NewRegistry] instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go index c5ff1474b..0a6c1bdca 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go @@ -9,6 +9,7 @@ package logger import ( "encoding/json" "io" + "math" "sync" "time" ) @@ -36,7 +37,11 @@ func NewIOSink(out io.Writer) *IOSink { // Info will write a JSON-encoded message to the io.Writer. func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) { - kvMap := make(map[string]interface{}, len(keysAndValues)/2+2) + mapSize := len(keysAndValues) / 2 + if math.MaxInt-mapSize >= 2 { + mapSize += 2 + } + kvMap := make(map[string]interface{}, mapSize) kvMap[KeyTimestamp] = time.Now().UnixNano() kvMap[KeyMessage] = msg diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index 1bedcc3f8..8d0a2031d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -531,6 +531,12 @@ func (cs *ChangeStream) ID() int64 { return cs.cursor.ID() } +// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent +// call to Next or TryNext will do a network request to fetch the next batch. +func (cs *ChangeStream) RemainingBatchLength() int { + return len(cs.batch) +} + // SetBatchSize sets the number of documents to fetch from the database with // each iteration of the ChangeStream's "Next" or "TryNext" method. This setting // only affects subsequent document batches fetched from the database. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 280749c7d..4266412aa 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -209,10 +209,6 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { clientOpt.SetMaxPoolSize(defaultMaxPoolSize) } - if err != nil { - return nil, err - } - cfg, err := topology.NewConfig(clientOpt, client.clock) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 555035ff5..4cf6fd1a1 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -1200,6 +1200,11 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (cur *Cursor, err error) { + + if ctx == nil { + ctx = context.Background() + } + // Omit "maxTimeMS" from operations that return a user-managed cursor to // prevent confusing "cursor not found" errors. To maintain existing // behavior for users who set "timeoutMS" with no context deadline, only @@ -1217,10 +1222,6 @@ func (coll *Collection) find( opts ...*options.FindOptions, ) (cur *Cursor, err error) { - if ctx == nil { - ctx = context.Background() - } - f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err @@ -1801,8 +1802,11 @@ func (coll *Collection) Indexes() IndexView { // SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection. func (coll *Collection) SearchIndexes() SearchIndexView { + c, _ := coll.Clone() // Clone() always return a nil error. + c.readConcern = nil + c.writeConcern = nil return SearchIndexView{ - coll: coll, + coll: c, } } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index c5cda9e5b..57c0186ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -580,7 +580,7 @@ func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collection return encryptedFields, nil } -// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// getEncryptedFieldsFromMap tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. // Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { // Check the EncryptedFieldsMap diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index db5674591..17b373130 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "net/http" "strings" @@ -1177,7 +1178,19 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := make([]byte, 0, len(keyData)+len(certData)+1) + keySize := len(keyData) + if keySize > 64*1024*1024 { + return "", errors.New("X.509 key must be less than 64 MiB") + } + certSize := len(certData) + if certSize > 64*1024*1024 { + return "", errors.New("X.509 certificate must be less than 64 MiB") + } + dataSize := keySize + certSize + 1 + if dataSize > math.MaxInt { + return "", errors.New("size overflow") + } + data := make([]byte, 0, dataSize) data = append(data, keyData...) data = append(data, '\n') data = append(data, certData...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index fd17ce44e..36088c2fc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -104,7 +104,7 @@ const ( // UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that // was changed. UpdateLookup FullDocument = "updateLookup" - // WhenAvailable includes a post-image of the the modified document for replace and update change events + // WhenAvailable includes a post-image of the modified document for replace and update change events // if the post-image for this event is available. WhenAvailable FullDocument = "whenAvailable" ) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go index 9774d615b..8cb8a08b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go @@ -9,6 +9,7 @@ package options // SearchIndexesOptions represents options that can be used to configure a SearchIndexView. type SearchIndexesOptions struct { Name *string + Type *string } // SearchIndexes creates a new SearchIndexesOptions instance. @@ -22,6 +23,12 @@ func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions { return sio } +// SetType sets the value for the Type field. +func (sio *SearchIndexesOptions) SetType(typ string) *SearchIndexesOptions { + sio.Type = &typ + return sio +} + // CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or // SearchIndexView.CreateMany operation. type CreateSearchIndexesOptions struct { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go index 6a7871531..73fe8534e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -109,6 +108,9 @@ func (siv SearchIndexView) CreateMany( if model.Options != nil && model.Options.Name != nil { indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name) } + if model.Options != nil && model.Options.Type != nil { + indexes = bsoncore.AppendStringElement(indexes, "type", *model.Options.Type) + } indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition) indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx) @@ -134,20 +136,13 @@ func (siv SearchIndexView) CreateMany( return nil, err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewCreateSearchIndexes(indexes). - Session(sess).WriteConcern(wc).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name).CommandMonitor(siv.coll.client.monitor). - Deployment(siv.coll.client.deployment).ServerSelector(selector).ServerAPI(siv.coll.client.serverAPI). + Session(sess).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Collection(siv.coll.name).Database(siv.coll.db.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) err = op.Execute(ctx) @@ -196,20 +191,12 @@ func (siv SearchIndexView) DropOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewDropSearchIndex(name). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) @@ -258,20 +245,12 @@ func (siv SearchIndexView) UpdateOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewUpdateSearchIndex(name, indexDefinition). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index 1d9472ec0..7a73d8d72 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -51,7 +51,7 @@ var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be n type WriteConcern struct { // W requests acknowledgment that the write operation has propagated to a // specified number of mongod instances or to mongod instances with - // specified tags. It sets the the "w" option in a MongoDB write concern. + // specified tags. It sets the "w" option in a MongoDB write concern. // // W values must be a string or an int. // diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 879bbdb7a..d929c4675 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.15.0" +var Driver = "1.16.1" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 88133293e..03925d7ad 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -8,6 +8,7 @@ package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( "bytes" + "encoding/binary" "fmt" "math" "strconv" @@ -706,17 +707,16 @@ func ReserveLength(dst []byte) (int32, []byte) { // UpdateLength updates the length at index with length and returns the []byte. func UpdateLength(dst []byte, index, length int32) []byte { - dst[index] = byte(length) - dst[index+1] = byte(length >> 8) - dst[index+2] = byte(length >> 16) - dst[index+3] = byte(length >> 24) + binary.LittleEndian.PutUint32(dst[index:], uint32(length)) return dst } func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) } func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(i32)) + return append(dst, b...) } // ReadLength reads an int32 length from src and returns the length and the remaining bytes. If @@ -734,27 +734,26 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return int32(binary.LittleEndian.Uint32(src)), src[4:], true } func appendi64(dst []byte, i64 int64) []byte { - return append(dst, - byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24), - byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(i64)) + return append(dst, b...) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func appendu32(dst []byte, u32 uint32) []byte { - return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, u32) + return append(dst, b...) } func readu32(src []byte) (uint32, []byte, bool) { @@ -762,23 +761,20 @@ func readu32(src []byte) (uint32, []byte, bool) { return 0, src, false } - return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true + return binary.LittleEndian.Uint32(src), src[4:], true } func appendu64(dst []byte, u64 uint64) []byte { - return append(dst, - byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24), - byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, u64) + return append(dst, b...) } func readu64(src []byte) (uint64, []byte, bool) { if len(src) < 8 { return 0, src, false } - u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 | - uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56) - return u64, src[8:], true + return binary.LittleEndian.Uint64(src), src[8:], true } // keep in sync with readcstringbytes diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go index 6837b53fc..f68e1da1a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. +// Package bsoncore is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package bsoncore contains functions that can be used to encode and decode +// BSON elements and values to or from a slice of bytes. These functions are +// aimed at allowing low level manipulation of BSON and can be used to build a +// higher level BSON library. // // The Read* functions within this package return the values of the element and // a boolean indicating if the values are valid. A boolean was used instead of @@ -15,15 +23,12 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to +// number of bytes available, it will return false, if there are enough bytes, +// it will return those bytes and true. It is the consumers responsibility to // validate those bytes. // // The Append* functions within this package will append the type value to the // given dst slice. If the slice has enough capacity, it will not grow the // slice. The Append*Element functions within this package operate in the same // way, but additionally append the BSON type and the key before the value. -// -// Warning: Package bsoncore is unstable and there is no backward compatibility -// guarantee. It is experimental and subject to change. package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md deleted file mode 100644 index 3c3e6c56c..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ /dev/null @@ -1,27 +0,0 @@ -# Driver Library Design - -This document outlines the design for this package. - -## Deployment, Server, and Connection - -Acquiring a `Connection` from a `Server` selected from a `Deployment` enables sending and receiving -wire messages. A `Deployment` represents an set of MongoDB servers and a `Server` represents a -member of that set. These three types form the operation execution stack. - -### Compression - -Compression is handled by Connection type while uncompression is handled automatically by the -Operation type. This is done because the compressor to use for compressing a wire message is -chosen by the connection during handshake, while uncompression can be performed without this -information. This does make the design of compression non-symmetric, but it makes the design simpler -to implement and more consistent. - -## Operation - -The `Operation` type handles executing a series of commands using a `Deployment`. For most uses -`Operation` will only execute a single command, but the main use case for a series of commands is -batch split write commands, such as insert. The type itself is heavily documented, so reading the -code and comments together should provide an understanding of how the type works. - -This type is not meant to be used directly by callers. Instead a wrapping type should be defined -using the IDL. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go new file mode 100644 index 000000000..99c4c3470 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package creds is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package creds diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go index 9db65cf19..5f9f1f574 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go @@ -4,20 +4,11 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package auth is not for public use. +// Package auth is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. // -// The API for packages in the 'private' directory have no stability -// guarantee. -// -// The packages within the 'private' directory would normally be put into an -// 'internal' directory to prohibit their use outside the 'mongo' directory. -// However, some MongoDB tools require very low-level access to the building -// blocks of a driver, so we have placed them under 'private' to allow these -// packages to be imported by projects that need them. -// -// These package APIs may be modified in backwards-incompatible ways at any -// time. -// -// You are strongly discouraged from directly using any packages -// under 'private'. +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go index d79b024b7..d9a6c68fe 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go @@ -30,7 +30,11 @@ type CompressionOpts struct { // destination writer. It panics on any errors and should only be used at // package initialization time. func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder { - enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(lvl)) + enc, err := zstd.NewWriter( + nil, + zstd.WithWindowSize(8<<20), // Set window size to 8MB. + zstd.WithEncoderLevel(lvl), + ) if err != nil { panic(err) } @@ -105,6 +109,13 @@ func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) { return dst, nil } +var zstdBufPool = sync.Pool{ + New: func() interface{} { + s := make([]byte, 0) + return &s + }, +} + // CompressPayload takes a byte slice and compresses it according to the options passed func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { switch opts.Compressor { @@ -123,7 +134,13 @@ func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { if err != nil { return nil, err } - return encoder.EncodeAll(in, nil), nil + ptr := zstdBufPool.Get().(*[]byte) + b := encoder.EncodeAll(in, *ptr) + dst := make([]byte, len(b)) + copy(dst, b) + *ptr = b[:0] + zstdBufPool.Put(ptr) + return dst, nil default: return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index 52068b8ea..686458e29 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package connstring is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index 6573a4c1a..9334d493e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package dns is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package dns import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 5fd3ddcb4..900729bf8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package driver is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index 24f9f9b0e..80f500085 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -7,6 +7,13 @@ //go:build !cse // +build !cse +// Package mongocrypt is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package mongocrypt import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go new file mode 100644 index 000000000..e0cc77052 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package options is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package options diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 870072872..2bff94a65 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package ocsp is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package ocsp import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index eb1acec88..db5367bed 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -622,7 +622,7 @@ func (op Operation) Execute(ctx context.Context) error { } }() for { - // If we're starting a retry and the the error from the previous try was + // If we're starting a retry and the error from the previous try was // a context canceled or deadline exceeded error, stop retrying and // return that error. if errors.Is(prevErr, context.Canceled) || errors.Is(prevErr, context.DeadlineExceeded) { @@ -1574,11 +1574,17 @@ func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) // operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is // not a Timeout context, calculateMaxTimeMS returns 0. func (op Operation) calculateMaxTimeMS(ctx context.Context, mon RTTMonitor) (uint64, error) { - if csot.IsTimeoutContext(ctx) { - if op.OmitCSOTMaxTimeMS { - return 0, nil - } - + // If CSOT is enabled and we're not omitting the CSOT-calculated maxTimeMS + // value, then calculate maxTimeMS. + // + // This allows commands that do not currently send CSOT-calculated maxTimeMS + // (e.g. Find and Aggregate) to still use a manually-provided maxTimeMS + // value. + // + // TODO(GODRIVER-2944): Remove or refactor this logic when we add the + // "timeoutMode" option, which will allow users to opt-in to the + // CSOT-calculated maxTimeMS values if that's the behavior they want. + if csot.IsTimeoutContext(ctx) && !op.OmitCSOTMaxTimeMS { if deadline, ok := ctx.Deadline(); ok { remainingTimeout := time.Until(deadline) rtt90 := mon.P90() @@ -1893,7 +1899,6 @@ func (op Operation) decodeResult(ctx context.Context, opcode wiremessage.OpCode, return nil, errors.New("malformed wire message: insufficient bytes to read single document") } case wiremessage.DocumentSequence: - // TODO(GODRIVER-617): Implement document sequence returns. _, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm) if !ok { return nil, errors.New("malformed wire message: insufficient bytes to read document sequence") @@ -1989,7 +1994,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma } } -// canPublishSucceededEvent returns true if a CommandSucceededEvent can be +// canPublishFinishedEvent returns true if a CommandSucceededEvent can be // published for the given command. This is true if the command is not an // unacknowledged write and the command monitor is monitoring succeeded events. func (op Operation) canPublishFinishedEvent(info finishedInformation) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go index a16f9d716..cb0d80795 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go @@ -15,7 +15,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -23,19 +22,18 @@ import ( // CreateSearchIndexes performs a createSearchIndexes operation. type CreateSearchIndexes struct { - indexes bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result CreateSearchIndexesResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + indexes bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result CreateSearchIndexesResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult. @@ -109,9 +107,15 @@ func (csi *CreateSearchIndexes) Execute(ctx context.Context) error { return driver.Operation{ CommandFn: csi.command, ProcessResponseFn: csi.processResponse, + Client: csi.session, + Clock: csi.clock, CommandMonitor: csi.monitor, + Crypt: csi.crypt, Database: csi.database, Deployment: csi.deployment, + Selector: csi.selector, + ServerAPI: csi.serverAPI, + Timeout: csi.timeout, }.Execute(ctx) } @@ -214,16 +218,6 @@ func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelect return csi } -// WriteConcern sets the write concern for this operation. -func (csi *CreateSearchIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateSearchIndexes { - if csi == nil { - csi = new(CreateSearchIndexes) - } - - csi.writeConcern = writeConcern - return csi -} - // ServerAPI sets the server API version for this operation. func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes { if csi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go new file mode 100644 index 000000000..e55b12a74 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package operation is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package operation diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go index 25cde8154..3992c8316 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,19 +21,18 @@ import ( // DropSearchIndex performs an dropSearchIndex operation. type DropSearchIndex struct { - index string - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result DropSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result DropSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // DropSearchIndexResult represents a dropSearchIndex result returned by the server. @@ -93,7 +91,6 @@ func (dsi *DropSearchIndex) Execute(ctx context.Context) error { Database: dsi.database, Deployment: dsi.deployment, Selector: dsi.selector, - WriteConcern: dsi.writeConcern, ServerAPI: dsi.serverAPI, Timeout: dsi.timeout, }.Execute(ctx) @@ -196,16 +193,6 @@ func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) return dsi } -// WriteConcern sets the write concern for this operation. -func (dsi *DropSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropSearchIndex { - if dsi == nil { - dsi = new(DropSearchIndex) - } - - dsi.writeConcern = writeConcern - return dsi -} - // ServerAPI sets the server API version for this operation. func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex { if dsi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go index ba807986c..64f2da7f6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,20 +21,19 @@ import ( // UpdateSearchIndex performs a updateSearchIndex operation. type UpdateSearchIndex struct { - index string - definition bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result UpdateSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + definition bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result UpdateSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result. @@ -95,7 +93,6 @@ func (usi *UpdateSearchIndex) Execute(ctx context.Context) error { Database: usi.database, Deployment: usi.deployment, Selector: usi.selector, - WriteConcern: usi.writeConcern, ServerAPI: usi.serverAPI, Timeout: usi.timeout, }.Execute(ctx) @@ -209,16 +206,6 @@ func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector return usi } -// WriteConcern sets the write concern for this operation. -func (usi *UpdateSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *UpdateSearchIndex { - if usi == nil { - usi = new(UpdateSearchIndex) - } - - usi.writeConcern = writeConcern - return usi -} - // ServerAPI sets the server API version for this operation. func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex { if usi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go new file mode 100644 index 000000000..80b2ac2dd --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package session is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package session diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index 476459e8e..649e87b3d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -320,7 +320,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead // If there was an error and the context was cancelled, we assume it happened due to the cancellation. if errors.Is(ctx.Err(), context.Canceled) { - return context.Canceled + return ctx.Err() } // If there was a timeout error and the context deadline was used, we convert the error into @@ -329,7 +329,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead return originalError } if netErr, ok := originalError.(net.Error); ok && netErr.Timeout() { - return context.DeadlineExceeded + return fmt.Errorf("%w: %s", context.DeadlineExceeded, originalError.Error()) } return originalError diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 07f508caa..c7b168dc2 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -56,7 +56,6 @@ type rttMonitor struct { cfg *rttConfig ctx context.Context cancelFn context.CancelFunc - started bool } var _ driver.RTTMonitor = &rttMonitor{} @@ -83,7 +82,6 @@ func (r *rttMonitor) connect() { r.connMu.Lock() defer r.connMu.Unlock() - r.started = true r.closeWg.Add(1) go func() { @@ -97,10 +95,6 @@ func (r *rttMonitor) disconnect() { r.connMu.Lock() defer r.connMu.Unlock() - if !r.started { - return - } - r.cancelFn() // Wait for the existing connection to complete. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index f4c6d744a..a29eea4a6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -125,6 +125,7 @@ type Server struct { processErrorLock sync.Mutex rttMonitor *rttMonitor + monitorOnce sync.Once } // updateTopologyCallback is a callback used to create a server that should be called when the parent Topology instance @@ -285,10 +286,10 @@ func (s *Server) Disconnect(ctx context.Context) error { close(s.done) s.cancelCheck() - s.rttMonitor.disconnect() s.pool.close(ctx) s.closewg.Wait() + s.rttMonitor.disconnect() atomic.StoreInt64(&s.state, serverDisconnected) return nil @@ -661,11 +662,11 @@ func (s *Server) update() { transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil && previousDescription.Kind != description.Unknown - if isStreamingEnabled(s) && isStreamable(s) && !s.rttMonitor.started { - s.rttMonitor.connect() + if isStreamingEnabled(s) && isStreamable(s) { + s.monitorOnce.Do(s.rttMonitor.connect) } - if isStreamable(s) || connectionIsStreaming || transitionedFromNetworkError { + if isStreamingEnabled(s) && (isStreamable(s) || connectionIsStreaming) || transitionedFromNetworkError { continue } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index 3dbbcfb86..0fb913d21 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -4,10 +4,19 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package topology contains types that handles the discovery, monitoring, and selection -// of servers. This package is designed to expose enough inner workings of service discovery -// and monitoring to allow low level applications to have fine grained control, while hiding -// most of the detailed implementation of the algorithms. +// Package topology is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package topology contains types that handles the discovery, monitoring, and +// selection of servers. This package is designed to expose enough inner +// workings of service discovery and monitoring to allow low level applications +// to have fine grained control, while hiding most of the detailed +// implementation of the algorithms. package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index abf09c15b..2199f855b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package wiremessage is intended for internal use only. It is made available +// to facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package wiremessage import ( "bytes" + "encoding/binary" "strings" "sync/atomic" @@ -231,10 +239,11 @@ func ReadHeader(src []byte) (length, requestID, responseTo int32, opcode OpCode, if len(src) < 16 { return 0, 0, 0, 0, src, false } - length = (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) - requestID = (int32(src[4]) | int32(src[5])<<8 | int32(src[6])<<16 | int32(src[7])<<24) - responseTo = (int32(src[8]) | int32(src[9])<<8 | int32(src[10])<<16 | int32(src[11])<<24) - opcode = OpCode(int32(src[12]) | int32(src[13])<<8 | int32(src[14])<<16 | int32(src[15])<<24) + + length = readi32unsafe(src) + requestID = readi32unsafe(src[4:]) + responseTo = readi32unsafe(src[8:]) + opcode = OpCode(readi32unsafe(src[12:])) return length, requestID, responseTo, opcode, src[16:], true } @@ -486,7 +495,7 @@ func ReadReplyCursorID(src []byte) (cursorID int64, rem []byte, ok bool) { return readi64(src) } -// ReadReplyStartingFrom reads the starting from from src. +// ReadReplyStartingFrom reads the starting from src. func ReadReplyStartingFrom(src []byte) (startingFrom int32, rem []byte, ok bool) { return readi32(src) } @@ -570,12 +579,16 @@ func ReadKillCursorsCursorIDs(src []byte, numIDs int32) (cursorIDs []int64, rem return cursorIDs, src, true } -func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) +func appendi32(dst []byte, x int32) []byte { + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(x)) + return append(dst, b...) } -func appendi64(b []byte, i int64) []byte { - return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) +func appendi64(dst []byte, x int64) []byte { + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(x)) + return append(dst, b...) } func appendCString(b []byte, str string) []byte { @@ -587,21 +600,18 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return readi32unsafe(src), src[4:], true } func readi32unsafe(src []byte) int32 { - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) + return int32(binary.LittleEndian.Uint32(src)) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func readcstring(src []byte) (string, []byte, bool) { diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md new file mode 100644 index 000000000..ec35080b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md @@ -0,0 +1,76 @@ +# GCP Resource detector + +The GCP resource detector supports detecting resources on: + + * Google Compute Engine (GCE) + * Google Kubernetes Engine (GKE) + * Google App Engine (GAE) + * Cloud Run + * Cloud Run jobs + * Cloud Functions + +## Usage + +```golang +ctx := context.Background() +// Detect your resources +res, err := resource.New(ctx, + // Use the GCP resource detector! + resource.WithDetectors(gcp.NewDetector()), + // Keep the default detectors + resource.WithTelemetrySDK(), + // Add your own custom attributes to identify your application + resource.WithAttributes( + semconv.ServiceNameKey.String("my-application"), + semconv.ServiceNamespaceKey.String("my-company-frontend-team"), + ), +) +if err != nil { + // Handle err +} +// Use the resource in your tracerprovider (or meterprovider) +tp := trace.NewTracerProvider( + // ... other options + trace.WithResource(res), +) +``` + +## Setting Kubernetes attributes + +Previous iterations of GCP resource detection attempted to detect +`container.name`, `k8s.pod.name` and `k8s.namespace.name`. When using this detector, +you should use this in your Pod Spec to set these using +[`OTEL_RESOURCE_ATTRIBUTES`](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable): + +```yaml +env: +- name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace +- name: CONTAINER_NAME + value: my-container-name +- name: OTEL_RESOURCE_ATTRIBUTES + value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE_NAME),k8s.container.name=$(CONTAINER_NAME) +``` +To have a detector unpack the `OTEL_RESOURCE_ATTRIBUTES` envvar, use the `WithFromEnv` option: + +```golang +... +// Detect your resources +res, err := resource.New(ctx, + resource.WithDetectors(gcp.NewDetector()), + resource.WithTelemetrySDK(), + resource.WithFromEnv(), // unpacks OTEL_RESOURCE_ATTRIBUTES + // Add your own custom attributes to identify your application + resource.WithAttributes( + semconv.ServiceNameKey.String("my-application"), + semconv.ServiceNamespaceKey.String("my-company-frontend-team"), + ), +) +... +``` diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go new file mode 100644 index 000000000..1c1490b02 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "os" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + gcpFunctionNameKey = "K_SERVICE" +) + +// NewCloudFunction will return a GCP Cloud Function resource detector. +// +// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes. +func NewCloudFunction() resource.Detector { + return &cloudFunction{ + cloudRun: NewCloudRun(), + } +} + +// cloudFunction collects resource information of GCP Cloud Function. +type cloudFunction struct { + cloudRun *CloudRun +} + +// Detect detects associated resources when running in GCP Cloud Function. +func (f *cloudFunction) Detect(ctx context.Context) (*resource.Resource, error) { + functionName, ok := f.googleCloudFunctionName() + if !ok { + return nil, nil + } + + projectID, err := f.cloudRun.mc.ProjectID() + if err != nil { + return nil, err + } + region, err := f.cloudRun.cloudRegion() + if err != nil { + return nil, err + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + semconv.CloudPlatformGCPCloudFunctions, + semconv.FaaSName(functionName), + semconv.CloudAccountID(projectID), + semconv.CloudRegion(region), + } + return resource.NewWithAttributes(semconv.SchemaURL, attributes...), nil +} + +func (f *cloudFunction) googleCloudFunctionName() (string, bool) { + return os.LookupEnv(gcpFunctionNameKey) +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go new file mode 100644 index 000000000..7754b4668 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const serviceNamespace = "cloud-run-managed" + +// The minimal list of metadata.Client methods we use. Use an interface so we +// can replace it with a fake implementation in the unit test. +type metadataClient interface { + ProjectID() (string, error) + Get(string) (string, error) + InstanceID() (string, error) +} + +// CloudRun collects resource information of Cloud Run instance. +// +// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes. +type CloudRun struct { + mc metadataClient + onGCE func() bool + getenv func(string) string +} + +// compile time assertion that CloudRun implements the resource.Detector +// interface. +var _ resource.Detector = (*CloudRun)(nil) + +// NewCloudRun creates a CloudRun detector. +// +// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes. +func NewCloudRun() *CloudRun { + return &CloudRun{ + mc: metadata.NewClient(nil), + onGCE: metadata.OnGCE, + getenv: os.Getenv, + } +} + +func (c *CloudRun) cloudRegion() (string, error) { + region, err := c.mc.Get("instance/region") + if err != nil { + return "", err + } + // Region from the metadata server is in the format /projects/123/regions/r. + // https://cloud.google.com/run/docs/reference/container-contract#metadata-server + return region[strings.LastIndex(region, "/")+1:], nil +} + +// Detect detects associated resources when running on Cloud Run hosts. +// NOTE: the service.namespace attribute is currently hardcoded to be +// "cloud-run-managed". This may change in the future, please do not rely on +// this behavior yet. +func (c *CloudRun) Detect(ctx context.Context) (*resource.Resource, error) { + // .OnGCE is actually testing whether the metadata server is available. + // Metadata server is supported on Cloud Run. + if !c.onGCE() { + return nil, nil + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + semconv.ServiceNamespace(serviceNamespace), + } + + var errInfo []string + + if projectID, err := c.mc.ProjectID(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if projectID != "" { + attributes = append(attributes, semconv.CloudAccountID(projectID)) + } + + if region, err := c.cloudRegion(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if region != "" { + attributes = append(attributes, semconv.CloudRegion(region)) + } + + if instanceID, err := c.mc.InstanceID(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if instanceID != "" { + attributes = append(attributes, semconv.ServiceInstanceID(instanceID)) + } + + // Part of Cloud Run container runtime contract. + // See https://cloud.google.com/run/docs/reference/container-contract + if service := c.getenv("K_SERVICE"); service == "" { + errInfo = append(errInfo, "envvar K_SERVICE contains empty string.") + } else { + attributes = append(attributes, semconv.ServiceName(service)) + } + res := resource.NewWithAttributes(semconv.SchemaURL, attributes...) + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting Cloud Run resources: %s", errInfo) + } + + return res, aggregatedErr +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go new file mode 100644 index 000000000..b9eb1e1e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "strconv" + + "cloud.google.com/go/compute/metadata" + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// NewDetector returns a resource detector which detects resource attributes on: +// * Google Compute Engine (GCE). +// * Google Kubernetes Engine (GKE). +// * Google App Engine (GAE). +// * Cloud Run. +// * Cloud Functions. +func NewDetector() resource.Detector { + return &detector{detector: gcp.NewDetector()} +} + +type detector struct { + detector gcpDetector +} + +// Detect detects associated resources when running on GCE, GKE, GAE, +// Cloud Run, and Cloud functions. +func (d *detector) Detect(ctx context.Context) (*resource.Resource, error) { + if !metadata.OnGCE() { + return nil, nil + } + b := &resourceBuilder{} + b.attrs = append(b.attrs, semconv.CloudProviderGCP) + b.add(semconv.CloudAccountIDKey, d.detector.ProjectID) + + switch d.detector.CloudPlatform() { + case gcp.GKE: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPKubernetesEngine) + b.addZoneOrRegion(d.detector.GKEAvailabilityZoneOrRegion) + b.add(semconv.K8SClusterNameKey, d.detector.GKEClusterName) + b.add(semconv.HostIDKey, d.detector.GKEHostID) + case gcp.CloudRun: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.CloudRunJob: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.GCPCloudRunJobExecutionKey, d.detector.CloudRunJobExecution) + b.addInt(semconv.GCPCloudRunJobTaskIndexKey, d.detector.CloudRunJobTaskIndex) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.CloudFunctions: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudFunctions) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.AppEngineFlex: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine) + b.addZoneAndRegion(d.detector.AppEngineFlexAvailabilityZoneAndRegion) + b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName) + b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion) + b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance) + case gcp.AppEngineStandard: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine) + b.add(semconv.CloudAvailabilityZoneKey, d.detector.AppEngineStandardAvailabilityZone) + b.add(semconv.CloudRegionKey, d.detector.AppEngineStandardCloudRegion) + b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName) + b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion) + b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance) + case gcp.GCE: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPComputeEngine) + b.addZoneAndRegion(d.detector.GCEAvailabilityZoneAndRegion) + b.add(semconv.HostTypeKey, d.detector.GCEHostType) + b.add(semconv.HostIDKey, d.detector.GCEHostID) + b.add(semconv.HostNameKey, d.detector.GCEHostName) + b.add(semconv.GCPGceInstanceNameKey, d.detector.GCEInstanceName) + b.add(semconv.GCPGceInstanceHostnameKey, d.detector.GCEInstanceHostname) + default: + // We don't support this platform yet, so just return with what we have + } + return b.build() +} + +// resourceBuilder simplifies constructing resources using GCP detection +// library functions. +type resourceBuilder struct { + errs []error + attrs []attribute.KeyValue +} + +func (r *resourceBuilder) add(key attribute.Key, detect func() (string, error)) { + if v, err := detect(); err == nil { + r.attrs = append(r.attrs, key.String(v)) + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) addInt(key attribute.Key, detect func() (string, error)) { + if v, err := detect(); err == nil { + if vi, err := strconv.Atoi(v); err == nil { + r.attrs = append(r.attrs, key.Int(vi)) + } else { + r.errs = append(r.errs, err) + } + } else { + r.errs = append(r.errs, err) + } +} + +// zoneAndRegion functions are expected to return zone, region, err. +func (r *resourceBuilder) addZoneAndRegion(detect func() (string, string, error)) { + if zone, region, err := detect(); err == nil { + r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(zone)) + r.attrs = append(r.attrs, semconv.CloudRegion(region)) + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) addZoneOrRegion(detect func() (string, gcp.LocationType, error)) { + if v, locType, err := detect(); err == nil { + switch locType { + case gcp.Zone: + r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(v)) + case gcp.Region: + r.attrs = append(r.attrs, semconv.CloudRegion(v)) + default: + r.errs = append(r.errs, fmt.Errorf("location must be zone or region. Got %v", locType)) + } + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) build() (*resource.Resource, error) { + var err error + if len(r.errs) > 0 { + err = fmt.Errorf("%w: %s", resource.ErrPartialResource, r.errs) + } + return resource.NewWithAttributes(semconv.SchemaURL, r.attrs...), err +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go new file mode 100644 index 000000000..2a29c420b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// GCE collects resource information of GCE computing instances. +// +// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes on GCE. +type GCE struct{} + +// compile time assertion that GCE implements the resource.Detector interface. +var _ resource.Detector = (*GCE)(nil) + +// Detect detects associated resources when running on GCE hosts. +func (gce *GCE) Detect(ctx context.Context) (*resource.Resource, error) { + if !metadata.OnGCE() { + return nil, nil + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + } + + var errInfo []string + + if projectID, err := metadata.ProjectIDWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if projectID != "" { + attributes = append(attributes, semconv.CloudAccountID(projectID)) + } + + if zone, err := metadata.ZoneWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if zone != "" { + attributes = append(attributes, semconv.CloudAvailabilityZone(zone)) + + splitArr := strings.SplitN(zone, "-", 3) + if len(splitArr) == 3 { + attributes = append(attributes, semconv.CloudRegion(strings.Join(splitArr[0:2], "-"))) + } + } + + if instanceID, err := metadata.InstanceIDWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if instanceID != "" { + attributes = append(attributes, semconv.HostID(instanceID)) + } + + if name, err := metadata.InstanceNameWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if name != "" { + attributes = append(attributes, semconv.HostName(name)) + } + + if hostname, err := os.Hostname(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if hostname != "" { + attributes = append(attributes, semconv.HostName(hostname)) + } + + if hostType, err := metadata.GetWithContext(ctx, "instance/machine-type"); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if hostType != "" { + attributes = append(attributes, semconv.HostType(hostType)) + } + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting GCE resources: %s", errInfo) + } + + return resource.NewWithAttributes(semconv.SchemaURL, attributes...), aggregatedErr +} + +// hasProblem checks if the err is not nil or for missing resources. +func hasProblem(err error) bool { + if err == nil { + return false + } + + var nde metadata.NotDefinedError + if undefined := errors.As(err, &nde); undefined { + return false + } + return true +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go new file mode 100644 index 000000000..0588ad6a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "os" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// GKE collects resource information of GKE computing instances. +// +// Deprecated: Use gcp.NewDetector() instead, which does NOT detect container, pod, and namespace attributes. +// Set those using name using the OTEL_RESOURCE_ATTRIBUTES env var instead. +type GKE struct{} + +// compile time assertion that GKE implements the resource.Detector interface. +var _ resource.Detector = (*GKE)(nil) + +// Detect detects associated resources when running in GKE environment. +func (gke *GKE) Detect(ctx context.Context) (*resource.Resource, error) { + gcpDetecor := GCE{} + gceLablRes, err := gcpDetecor.Detect(ctx) + + if os.Getenv("KUBERNETES_SERVICE_HOST") == "" { + return gceLablRes, err + } + + var errInfo []string + if err != nil { + errInfo = append(errInfo, err.Error()) + } + + attributes := []attribute.KeyValue{ + semconv.K8SNamespaceName(os.Getenv("NAMESPACE")), + semconv.K8SPodName(os.Getenv("HOSTNAME")), + } + + if containerName := os.Getenv("CONTAINER_NAME"); containerName != "" { + attributes = append(attributes, semconv.ContainerName(containerName)) + } + + if clusterName, err := metadata.InstanceAttributeValueWithContext(ctx, "cluster-name"); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if clusterName != "" { + attributes = append(attributes, semconv.K8SClusterName(clusterName)) + } + + k8sattributeRes := resource.NewWithAttributes(semconv.SchemaURL, attributes...) + + res, err := resource.Merge(gceLablRes, k8sattributeRes) + if err != nil { + errInfo = append(errInfo, err.Error()) + } + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting GKE resources: %s", errInfo) + } + + return res, aggregatedErr +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go new file mode 100644 index 000000000..666d82e61 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp" + +// gcpDetector can detect attributes of GCP environments. +type gcpDetector interface { + ProjectID() (string, error) + CloudPlatform() gcp.Platform + GKEAvailabilityZoneOrRegion() (string, gcp.LocationType, error) + GKEClusterName() (string, error) + GKEHostID() (string, error) + FaaSName() (string, error) + FaaSVersion() (string, error) + FaaSID() (string, error) + FaaSCloudRegion() (string, error) + AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) + AppEngineStandardAvailabilityZone() (string, error) + AppEngineStandardCloudRegion() (string, error) + AppEngineServiceName() (string, error) + AppEngineServiceVersion() (string, error) + AppEngineServiceInstance() (string, error) + GCEAvailabilityZoneAndRegion() (string, string, error) + GCEHostType() (string, error) + GCEHostID() (string, error) + GCEHostName() (string, error) + GCEInstanceHostname() (string, error) + GCEInstanceName() (string, error) + CloudRunJobExecution() (string, error) + CloudRunJobTaskIndex() (string, error) +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go new file mode 100644 index 000000000..1acc89831 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +// Version is the current release version of the GCP resource detector. +func Version() string { + return "1.29.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index ab091cf6a..18436eaed 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 201867a86..fbcbfb84e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index a15d06cb0..04f425edf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf58..5d6e6156b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f0a9bb9ef..a01bfafbe 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index d01bdccf4..33580a35b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,11 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -24,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -34,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -96,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) - h.responseBytesCounter.Add(ctx, rww.written, addOpts...) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 000000000..a945f5566 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 000000000..aea171fb2 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3ec0ad00c..9cae4cab8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" "os" @@ -11,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -23,6 +25,11 @@ type ResponseTelemetry struct { type HTTPServer struct { duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue { return oldHTTPServer{}.Route(route) } -func NewHTTPServer() HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) - return HTTPServer{duplicate: env == "http/dup"} -} - -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go similarity index 57% rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 0c5d4c460..745b8c67b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -4,11 +4,14 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "fmt" "net/http" + "reflect" + "strconv" "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type newHTTPServer struct{} @@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke func (n newHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e7f293761..e6e14924f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -9,8 +9,9 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. } func requiredHTTPPort(https bool, port int) int { // nolint:revive @@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{ http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodTrace: semconvNew.HTTPRequestMethodTrace, } + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c3e838aaa..c999b05e6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -7,9 +7,13 @@ import ( "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) @@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index a9a9226b3..b80a1db61 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 0d3cb2e4a..b4119d343 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,13 +11,15 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) @@ -26,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, addOpts...) + t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index b0957f28c..502c1bdaf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 948f8406c..000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *respWriterWrapper) Flush() { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b649..d9abe194d 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e..6107c17b8 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 202554933..5904bb707 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,7 +5,7 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58..b7402576f 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -650,7 +650,7 @@ should be canceled. ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76..070b1e57d 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a8909317..657df3471 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d..59992984d 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc..b3569e95e 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6..2acbac354 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c59501..921f85961 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b35..9b1da2c02 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e01..14e08c24a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 000000000..f81b1576a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 000000000..06e6d8685 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 000000000..a4faa6a03 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 000000000..f2cdf3c65 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// +// Deprecated: use [Scope] instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 000000000..728115045 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 000000000..fab61647c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 000000000..68d296cbe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md new file mode 100644 index 000000000..017f072a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md @@ -0,0 +1,3 @@ +# Metric SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go new file mode 100644 index 000000000..e6f5cfb2a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "fmt" + "slices" +) + +// errAgg is wrapped by misconfigured aggregations. +var errAgg = errors.New("aggregation") + +// Aggregation is the aggregation used to summarize recorded measurements. +type Aggregation interface { + // copy returns a deep copy of the Aggregation. + copy() Aggregation + + // err returns an error for any misconfigured Aggregation. + err() error +} + +// AggregationDrop is an Aggregation that drops all recorded data. +type AggregationDrop struct{} // AggregationDrop has no parameters. + +var _ Aggregation = AggregationDrop{} + +// copy returns a deep copy of d. +func (d AggregationDrop) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A drop aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDrop) err() error { return nil } + +// AggregationDefault is an Aggregation that uses the default instrument kind selection +// mapping to select another Aggregation. A metric reader can be configured to +// make an aggregation selection based on instrument kind that differs from +// the default. This Aggregation ensures the default is used. +// +// See the [DefaultAggregationSelector] for information about the default +// instrument kind selection mapping. +type AggregationDefault struct{} // AggregationDefault has no parameters. + +var _ Aggregation = AggregationDefault{} + +// copy returns a deep copy of d. +func (d AggregationDefault) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A default aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDefault) err() error { return nil } + +// AggregationSum is an Aggregation that summarizes a set of measurements as their +// arithmetic sum. +type AggregationSum struct{} // AggregationSum has no parameters. + +var _ Aggregation = AggregationSum{} + +// copy returns a deep copy of s. +func (s AggregationSum) copy() Aggregation { return s } + +// err returns an error for any misconfiguration. A sum aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationSum) err() error { return nil } + +// AggregationLastValue is an Aggregation that summarizes a set of measurements as the +// last one made. +type AggregationLastValue struct{} // AggregationLastValue has no parameters. + +var _ Aggregation = AggregationLastValue{} + +// copy returns a deep copy of l. +func (l AggregationLastValue) copy() Aggregation { return l } + +// err returns an error for any misconfiguration. A last-value aggregation has +// no parameters and cannot be misconfigured, therefore this always returns +// nil. +func (AggregationLastValue) err() error { return nil } + +// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with explicitly defined buckets. +type AggregationExplicitBucketHistogram struct { + // Boundaries are the increasing bucket boundary values. Boundary values + // define bucket upper bounds. Buckets are exclusive of their lower + // boundary and inclusive of their upper bound (except at positive + // infinity). A measurement is defined to fall into the greatest-numbered + // bucket with a boundary that is greater than or equal to the + // measurement. As an example, boundaries defined as: + // + // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} + // + // Will define these buckets: + // + // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], + // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], + // (500.0, 1000.0], (1000.0, +∞) + Boundaries []float64 + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationExplicitBucketHistogram{} + +// errHist is returned by misconfigured ExplicitBucketHistograms. +var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) + +// err returns an error for any misconfiguration. +func (h AggregationExplicitBucketHistogram) err() error { + if len(h.Boundaries) <= 1 { + return nil + } + + // Check boundaries are monotonic. + i := h.Boundaries[0] + for _, j := range h.Boundaries[1:] { + if i >= j { + return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) + } + i = j + } + + return nil +} + +// copy returns a deep copy of h. +func (h AggregationExplicitBucketHistogram) copy() Aggregation { + return AggregationExplicitBucketHistogram{ + Boundaries: slices.Clone(h.Boundaries), + NoMinMax: h.NoMinMax, + } +} + +// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with bucket widths that grow exponentially. +type AggregationBase2ExponentialHistogram struct { + // MaxSize is the maximum number of buckets to use for the histogram. + MaxSize int32 + // MaxScale is the maximum resolution scale to use for the histogram. + // + // MaxScale has a maximum value of 20. Using a value of 20 means the + // maximum number of buckets that can fit within the range of a + // signed 32-bit integer index could be used. + // + // MaxScale has a minimum value of -10. Using a value of -10 means only + // two buckets will be used. + MaxScale int32 + + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationBase2ExponentialHistogram{} + +// copy returns a deep copy of the Aggregation. +func (e AggregationBase2ExponentialHistogram) copy() Aggregation { + return e +} + +const ( + expoMaxScale = 20 + expoMinScale = -10 +) + +// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms. +var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg) + +// err returns an error for any misconfigured Aggregation. +func (e AggregationBase2ExponentialHistogram) err() error { + if e.MaxScale > expoMaxScale { + return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale) + } + if e.MaxSize <= 0 { + return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go new file mode 100644 index 000000000..63b88f086 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "sync" +) + +// cache is a locking storage used to quickly return already computed values. +// +// The zero value of a cache is empty and ready to use. +// +// A cache must not be copied after first use. +// +// All methods of a cache are safe to call concurrently. +type cache[K comparable, V any] struct { + sync.Mutex + data map[K]V +} + +// Lookup returns the value stored in the cache with the associated key if it +// exists. Otherwise, f is called and its returned value is set in the cache +// for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cache lock, so f +// should not block excessively. +func (c *cache[K, V]) Lookup(key K, f func() V) V { + c.Lock() + defer c.Unlock() + + if c.data == nil { + val := f() + c.data = map[K]V{key: val} + return val + } + if v, ok := c.data[key]; ok { + return v + } + val := f() + c.data[key] = val + return val +} + +// HasKey returns true if Lookup has previously been called with that key +// +// HasKey is safe to call concurrently. +func (c *cache[K, V]) HasKey(key K) bool { + c.Lock() + defer c.Unlock() + _, ok := c.data[key] + return ok +} + +// cacheWithErr is a locking storage used to quickly return already computed values and an error. +// +// The zero value of a cacheWithErr is empty and ready to use. +// +// A cacheWithErr must not be copied after first use. +// +// All methods of a cacheWithErr are safe to call concurrently. +type cacheWithErr[K comparable, V any] struct { + cache[K, valAndErr[V]] +} + +type valAndErr[V any] struct { + val V + err error +} + +// Lookup returns the value stored in the cacheWithErr with the associated key +// if it exists. Otherwise, f is called and its returned value is set in the +// cacheWithErr for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f +// should not block excessively. +func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) { + combined := c.cache.Lookup(key, func() valAndErr[V] { + val, err := f() + return valAndErr[V]{val: val, err: err} + }) + return combined.val, combined.err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go new file mode 100644 index 000000000..bbe7bf671 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/sdk/resource" +) + +// config contains configuration options for a MeterProvider. +type config struct { + res *resource.Resource + readers []Reader + views []View +} + +// readerSignals returns a force-flush and shutdown function for a +// MeterProvider to call in their respective options. All Readers c contains +// will have their force-flush and shutdown methods unified into returned +// single functions. +func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { + var fFuncs, sFuncs []func(context.Context) error + for _, r := range c.readers { + sFuncs = append(sFuncs, r.Shutdown) + if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok { + fFuncs = append(fFuncs, f.ForceFlush) + } + } + + return unify(fFuncs), unifyShutdown(sFuncs) +} + +// unify unifies calling all of funcs into a single function call. All errors +// returned from calls to funcs will be unify into a single error return +// value. +func unify(funcs []func(context.Context) error) func(context.Context) error { + return func(ctx context.Context) error { + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + return unifyErrors(errs) + } +} + +// unifyErrors combines multiple errors into a single error. +func unifyErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } +} + +// unifyShutdown unifies calling all of funcs once for a shutdown. If called +// more than once, an ErrReaderShutdown error is returned. +func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { + f := unify(funcs) + var once sync.Once + return func(ctx context.Context) error { + err := ErrReaderShutdown + once.Do(func() { err = f(ctx) }) + return err + } +} + +// newConfig returns a config configured with options. +func newConfig(options []Option) config { + conf := config{res: resource.Default()} + for _, o := range options { + conf = o.apply(conf) + } + return conf +} + +// Option applies a configuration option value to a MeterProvider. +type Option interface { + apply(config) config +} + +// optionFunc applies a set of options to a config. +type optionFunc func(config) config + +// apply returns a config with option(s) applied. +func (o optionFunc) apply(conf config) config { + return o(conf) +} + +// WithResource associates a Resource with a MeterProvider. This Resource +// represents the entity producing telemetry and is associated with all Meters +// the MeterProvider will create. +// +// By default, if this Option is not used, the default Resource from the +// go.opentelemetry.io/otel/sdk/resource package will be used. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(conf config) config { + conf.res = res + return conf + }) +} + +// WithReader associates Reader r with a MeterProvider. +// +// By default, if this option is not used, the MeterProvider will perform no +// operations; no data will be exported without a Reader. +func WithReader(r Reader) Option { + return optionFunc(func(cfg config) config { + if r == nil { + return cfg + } + cfg.readers = append(cfg.readers, r) + return cfg + }) +} + +// WithView associates views with a MeterProvider. +// +// Views are appended to existing ones in a MeterProvider if this option is +// used multiple times. +// +// By default, if this option is not used, the MeterProvider will use the +// default view. +func WithView(views ...View) Option { + return optionFunc(func(cfg config) config { + cfg.views = append(cfg.views, views...) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go new file mode 100644 index 000000000..90a4ae16c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package metric provides an implementation of the OpenTelemetry metrics SDK. +// +// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information +// about the concept of OpenTelemetry metrics and +// https://opentelemetry.io/docs/concepts/components/ for more information +// about OpenTelemetry SDKs. +// +// The entry point for the metric package is the MeterProvider. It is the +// object that all API calls use to create Meters, instruments, and ultimately +// make metric measurements. Also, it is an object that should be used to +// control the life-cycle (start, flush, and shutdown) of the SDK. +// +// A MeterProvider needs to be configured to export the measured data, this is +// done by configuring it with a Reader implementation (using the WithReader +// MeterProviderOption). Readers take two forms: ones that push to an endpoint +// (NewPeriodicReader), and ones that an endpoint pulls from. See +// [go.opentelemetry.io/otel/exporters] for exporters that can be used as +// or with these Readers. +// +// Each Reader, when registered with the MeterProvider, can be augmented with a +// View. Views allow users that run OpenTelemetry instrumented code to modify +// the generated data of that instrumentation. +// +// The data generated by a MeterProvider needs to include information about its +// origin. A MeterProvider needs to be configured with a Resource, using the +// WithResource MeterProviderOption, to include this information. This Resource +// should be used to describe the unique runtime environment instrumented code +// is being run on. That way when multiple instances of the code are collected +// at a single endpoint their origin is decipherable. +// +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// +// See [go.opentelemetry.io/otel/metric] for more information about +// the metric API. +// +// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about +// the experimental features. +package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go new file mode 100644 index 000000000..a6c403797 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "strconv" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // The time interval (in milliseconds) between the start of two export attempts. + envInterval = "OTEL_METRIC_EXPORT_INTERVAL" + // Maximum allowed time (in milliseconds) to export data. + envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT" +) + +// envDuration returns an environment variable's value as duration in milliseconds if it is exists, +// or the defaultValue if the environment variable is not defined or the value is not valid. +func envDuration(key string, defaultValue time.Duration) time.Duration { + v := os.Getenv(key) + if v == "" { + return defaultValue + } + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "environment variable", key, "value", v) + return defaultValue + } + if d <= 0 { + global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v) + return defaultValue + } + return time.Duration(d) * time.Millisecond +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go new file mode 100644 index 000000000..82619da78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "runtime" + "slices" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/x" +) + +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and user defined +// environment variables. +// +// Note: This will only return non-nil values when the experimental exemplar +// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable +// is not set to always_off. +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { + if !x.Exemplars.Enabled() { + return nil + } + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + var filter exemplar.Filter + + switch os.Getenv(filterEnvKey) { + case "always_on": + filter = exemplar.AlwaysOnFilter + case "always_off": + return exemplar.Drop + case "trace_based": + fallthrough + default: + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) + } + } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go new file mode 100644 index 000000000..1a3cccb67 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ErrExporterShutdown is returned if Export or Shutdown are called after an +// Exporter has been Shutdown. +var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") + +// Exporter handles the delivery of metric data to external receivers. This is +// the final component in the metric push pipeline. +type Exporter interface { + // Temporality returns the Temporality to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Temporality(InstrumentKind) metricdata.Temporality + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Aggregation returns the Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Aggregation(InstrumentKind) Aggregation + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Export serializes and transmits metric data to a receiver. + // + // This is called synchronously, there is no concurrency safety + // requirement. Because of this, it is critical that all timeouts and + // cancellations of the passed context be honored. + // + // All retry logic must be contained in this function. The SDK does not + // implement any retry logic. All errors returned by this function are + // considered unrecoverable and will be reported to a configured error + // Handler. + // + // The passed ResourceMetrics may be reused when the call completes. If an + // exporter needs to hold this data after it returns, it needs to make a + // copy. + Export(context.Context, *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush flushes any metric data held by an exporter. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // This method needs to be concurrent safe. + ForceFlush(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric data held by an exporter and releases any + // held computational resources. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // After Shutdown is called, calls to Export will perform no operation and + // instead will return an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go new file mode 100644 index 000000000..b52a330b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -0,0 +1,347 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +var zeroScope instrumentation.Scope + +// InstrumentKind is the identifier of a group of instruments that all +// performing the same function. +type InstrumentKind uint8 + +const ( + // instrumentKindUndefined is an undefined instrument kind, it should not + // be used by any initialized type. + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused + // InstrumentKindCounter identifies a group of instruments that record + // increasing values synchronously with the code path they are measuring. + InstrumentKindCounter InstrumentKind = 1 + // InstrumentKindUpDownCounter identifies a group of instruments that + // record increasing and decreasing values synchronously with the code path + // they are measuring. + InstrumentKindUpDownCounter InstrumentKind = 2 + // InstrumentKindHistogram identifies a group of instruments that record a + // distribution of values synchronously with the code path they are + // measuring. + InstrumentKindHistogram InstrumentKind = 3 + // InstrumentKindObservableCounter identifies a group of instruments that + // record increasing values in an asynchronous callback. + InstrumentKindObservableCounter InstrumentKind = 4 + // InstrumentKindObservableUpDownCounter identifies a group of instruments + // that record increasing and decreasing values in an asynchronous + // callback. + InstrumentKindObservableUpDownCounter InstrumentKind = 5 + // InstrumentKindObservableGauge identifies a group of instruments that + // record current values in an asynchronous callback. + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 +) + +type nonComparable [0]func() // nolint: unused // This is indeed used. + +// Instrument describes properties an instrument is created with. +type Instrument struct { + // Name is the human-readable identifier of the instrument. + Name string + // Description describes the purpose of the instrument. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of measurement recorded by the instrument. + Unit string + // Scope identifies the instrumentation that created the instrument. + Scope instrumentation.Scope + + // Ensure forward compatibility if non-comparable fields need to be added. + nonComparable // nolint: unused +} + +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +// matches returns whether all the non-zero-value fields of i match the +// corresponding fields of other. If i is empty it will match all other, and +// true will always be returned. +func (i Instrument) matches(other Instrument) bool { + return i.matchesName(other) && + i.matchesDescription(other) && + i.matchesKind(other) && + i.matchesUnit(other) && + i.matchesScope(other) +} + +// matchesName returns true if the Name of i is "" or it equals the Name of +// other, otherwise false. +func (i Instrument) matchesName(other Instrument) bool { + return i.Name == "" || i.Name == other.Name +} + +// matchesDescription returns true if the Description of i is "" or it equals +// the Description of other, otherwise false. +func (i Instrument) matchesDescription(other Instrument) bool { + return i.Description == "" || i.Description == other.Description +} + +// matchesKind returns true if the Kind of i is its zero-value or it equals the +// Kind of other, otherwise false. +func (i Instrument) matchesKind(other Instrument) bool { + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind +} + +// matchesUnit returns true if the Unit of i is its zero-value or it equals the +// Unit of other, otherwise false. +func (i Instrument) matchesUnit(other Instrument) bool { + return i.Unit == "" || i.Unit == other.Unit +} + +// matchesScope returns true if the Scope of i is its zero-value or it equals +// the Scope of other, otherwise false. +func (i Instrument) matchesScope(other Instrument) bool { + return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) && + (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) && + (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL) +} + +// Stream describes the stream of data an instrument produces. +type Stream struct { + // Name is the human-readable identifier of the stream. + Name string + // Description describes the purpose of the data. + Description string + // Unit is the unit of measurement recorded. + Unit string + // Aggregation the stream uses for an instrument. + Aggregation Aggregation + // AttributeFilter is an attribute Filter applied to the attributes + // recorded for an instrument's measurement. If the filter returns false + // the attribute will not be recorded, otherwise, if it returns true, it + // will record the attribute. + // + // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to + // provide an allow-list of attribute keys here. + AttributeFilter attribute.Filter +} + +// instID are the identifying properties of a instrument. +type instID struct { + // Name is the name of the stream. + Name string + // Description is the description of the stream. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of the stream. + Unit string + // Number is the number type of the stream. + Number string +} + +// Returns a normalized copy of the instID i. +// +// Instrument names are considered case-insensitive. Standardize the instrument +// name to always be lowercase for the returned instID so it can be compared +// without the name casing affecting the comparison. +func (i instID) normalize() instID { + i.Name = strings.ToLower(i.Name) + return i +} + +type int64Inst struct { + measures []aggregate.Measure[int64] + + embedded.Int64Counter + embedded.Int64UpDownCounter + embedded.Int64Histogram + embedded.Int64Gauge +} + +var ( + _ metric.Int64Counter = (*int64Inst)(nil) + _ metric.Int64UpDownCounter = (*int64Inst)(nil) + _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) +) + +func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. + for _, in := range i.measures { + in(ctx, val, s) + } +} + +type float64Inst struct { + measures []aggregate.Measure[float64] + + embedded.Float64Counter + embedded.Float64UpDownCounter + embedded.Float64Histogram + embedded.Float64Gauge +} + +var ( + _ metric.Float64Counter = (*float64Inst)(nil) + _ metric.Float64UpDownCounter = (*float64Inst)(nil) + _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) +) + +func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { + for _, in := range i.measures { + in(ctx, val, s) + } +} + +// observablID is a comparable unique identifier of an observable. +type observablID[N int64 | float64] struct { + name string + description string + kind InstrumentKind + unit string + scope instrumentation.Scope +} + +type float64Observable struct { + metric.Float64Observable + *observable[float64] + + embedded.Float64ObservableCounter + embedded.Float64ObservableUpDownCounter + embedded.Float64ObservableGauge +} + +var ( + _ metric.Float64ObservableCounter = float64Observable{} + _ metric.Float64ObservableUpDownCounter = float64Observable{} + _ metric.Float64ObservableGauge = float64Observable{} +) + +func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable { + return float64Observable{ + observable: newObservable[float64](m, kind, name, desc, u), + } +} + +type int64Observable struct { + metric.Int64Observable + *observable[int64] + + embedded.Int64ObservableCounter + embedded.Int64ObservableUpDownCounter + embedded.Int64ObservableGauge +} + +var ( + _ metric.Int64ObservableCounter = int64Observable{} + _ metric.Int64ObservableUpDownCounter = int64Observable{} + _ metric.Int64ObservableGauge = int64Observable{} +) + +func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable { + return int64Observable{ + observable: newObservable[int64](m, kind, name, desc, u), + } +} + +type observable[N int64 | float64] struct { + metric.Observable + observablID[N] + + meter *meter + measures measures[N] + dropAggregation bool +} + +func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { + return &observable[N]{ + observablID: observablID[N]{ + name: name, + description: desc, + kind: kind, + unit: u, + scope: m.scope, + }, + meter: m, + } +} + +// observe records the val for the set of attrs. +func (o *observable[N]) observe(val N, s attribute.Set) { + o.measures.observe(val, s) +} + +func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) { + o.measures = append(o.measures, meas...) +} + +type measures[N int64 | float64] []aggregate.Measure[N] + +// observe records the val for the set of attrs. +func (m measures[N]) observe(val N, s attribute.Set) { + for _, in := range m { + in(context.Background(), val, s) + } +} + +var errEmptyAgg = errors.New("no aggregators for observable instrument") + +// registerable returns an error if the observable o should not be registered, +// and nil if it should. An errEmptyAgg error is returned if o is effectively a +// no-op because it does not have any aggregators. Also, an error is returned +// if scope defines a Meter other than the one o was created by. +func (o *observable[N]) registerable(m *meter) error { + if len(o.measures) == 0 { + return errEmptyAgg + } + if m != o.meter { + return fmt.Errorf( + "invalid registration: observable %q from Meter %q, registered with Meter %q", + o.name, + o.scope.Name, + m.scope.Name, + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go new file mode 100644 index 000000000..25ea6244e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT. + +package metric + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[instrumentKindUndefined-0] + _ = x[InstrumentKindCounter-1] + _ = x[InstrumentKindUpDownCounter-2] + _ = x[InstrumentKindHistogram-3] + _ = x[InstrumentKindObservableCounter-4] + _ = x[InstrumentKindObservableUpDownCounter-5] + _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] +} + +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" + +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} + +func (i InstrumentKind) String() string { + if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go new file mode 100644 index 000000000..b18ee719b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// now is used to return the current local time while allowing tests to +// override the default time.Now function. +var now = time.Now + +// Measure receives measurements to be aggregated. +type Measure[N int64 | float64] func(context.Context, N, attribute.Set) + +// ComputeAggregation stores the aggregate of measurements into dest and +// returns the number of aggregate data-points output. +type ComputeAggregation func(dest *metricdata.Aggregation) int + +// Builder builds an aggregate function. +type Builder[N int64 | float64] struct { + // Temporality is the temporality used for the returned aggregate function. + // + // If this is not provided a default of cumulative will be used (except for + // the last-value aggregate function where delta is the only appropriate + // temporality). + Temporality metricdata.Temporality + // Filter is the attribute filter the aggregate function will use on the + // input of measurements. + Filter attribute.Filter + // ReservoirFunc is the factory function used by aggregate functions to + // create new exemplar reservoirs for a new seen attribute set. + // + // If this is not provided a default factory function that returns an + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.FilteredReservoir[N] + // AggregationLimit is the cardinality limit of measurement attributes. Any + // measurement for new attributes once the limit has been reached will be + // aggregated into a single aggregate for the "otel.metric.overflow" + // attribute. + // + // If AggregationLimit is less than or equal to zero there will not be an + // aggregation limit imposed (i.e. unlimited attribute sets). + AggregationLimit int +} + +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { + if b.ReservoirFunc != nil { + return b.ReservoirFunc + } + + return exemplar.Drop +} + +type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) + +func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { + if b.Filter != nil { + fltr := b.Filter // Copy to make it immutable after assignment. + return func(ctx context.Context, n N, a attribute.Set) { + fAttr, dropped := a.Filter(fltr) + f(ctx, n, fAttr, dropped) + } + } + return func(ctx context.Context, n N, a attribute.Set) { + f(ctx, n, a, nil) + } +} + +// LastValue returns a last-value aggregate function input and output. +func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { + lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedSum returns a sum aggregate function input and output. The +// arguments passed to the input are expected to be the precomputed sum values. +func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// Sum returns a sum aggregate function input and output. +func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// ExplicitBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// ExponentialBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// reset ensures s has capacity and sets it length. If the capacity of s too +// small, a new slice is returned with the specified capacity and length. +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go new file mode 100644 index 000000000..7b7225e6e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package aggregate provides aggregate types used compute aggregations and +// cycle the state of metric measurements made by the SDK. These types and +// functionality are meant only for internal SDK use. +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 000000000..170ae8e58 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go new file mode 100644 index 000000000..707342408 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -0,0 +1,444 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +const ( + expoMaxScale = 20 + expoMinScale = -10 + + smallestNonZeroNormalFloat64 = 0x1p-1022 + + // These redefine the Math constants with a type, so the compiler won't coerce + // them into an int on 32 bit platforms. + maxInt64 int64 = math.MaxInt64 + minInt64 int64 = math.MinInt64 +) + +// expoHistogramDataPoint is a single data point in an exponential histogram. +type expoHistogramDataPoint[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + count uint64 + min N + max N + sum N + + maxSize int + noMinMax bool + noSum bool + + scale int32 + + posBuckets expoBuckets + negBuckets expoBuckets + zeroCount uint64 +} + +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { + f := math.MaxFloat64 + max := N(f) // if N is int64, max will overflow to -9223372036854775808 + min := N(-f) + if N(maxInt64) > N(f) { + max = N(maxInt64) + min = N(minInt64) + } + return &expoHistogramDataPoint[N]{ + attrs: attrs, + min: max, + max: min, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + scale: maxScale, + } +} + +// record adds a new measurement to the histogram. It will rescale the buckets if needed. +func (p *expoHistogramDataPoint[N]) record(v N) { + p.count++ + + if !p.noMinMax { + if v < p.min { + p.min = v + } + if v > p.max { + p.max = v + } + } + if !p.noSum { + p.sum += v + } + + absV := math.Abs(float64(v)) + + if float64(absV) == 0.0 { + p.zeroCount++ + return + } + + bin := p.getBin(absV) + + bucket := &p.posBuckets + if v < 0 { + bucket = &p.negBuckets + } + + // If the new bin would make the counts larger than maxScale, we need to + // downscale current measurements. + if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { + if p.scale-scaleDelta < expoMinScale { + // With a scale of -10 there is only two buckets for the whole range of float64 values. + // This can only happen if there is a max size of 1. + otel.Handle(errors.New("exponential histogram scale underflow")) + return + } + // Downscale + p.scale -= scaleDelta + p.posBuckets.downscale(scaleDelta) + p.negBuckets.downscale(scaleDelta) + + bin = p.getBin(absV) + } + + bucket.record(bin) +} + +// getBin returns the bin v should be recorded into. +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec + if p.scale <= 0 { + // Because of the choice of fraction is always 1 power of two higher than we want. + var correction int32 = 1 + if frac == .5 { + // If v is an exact power of two the frac will be .5 and the exp + // will be one higher than we want. + correction = 2 + } + return (exp - correction) >> (-p.scale) + } + return exp<= bin { + low = int(bin) + high = int(startBin) + length - 1 + } + + var count int32 + for high-low >= p.maxSize { + low = low >> 1 + high = high >> 1 + count++ + if count > expoMaxScale-expoMinScale { + return count + } + } + return count +} + +// expoBuckets is a set of buckets in an exponential histogram. +type expoBuckets struct { + startBin int32 + counts []uint64 +} + +// record increments the count for the given bin, and expands the buckets if needed. +// Size changes must be done before calling this function. +func (b *expoBuckets) record(bin int32) { + if len(b.counts) == 0 { + b.counts = []uint64{1} + b.startBin = bin + return + } + + endBin := int(b.startBin) + len(b.counts) - 1 + + // if the new bin is inside the current range + if bin >= b.startBin && int(bin) <= endBin { + b.counts[bin-b.startBin]++ + return + } + // if the new bin is before the current start add spaces to the counts + if bin < b.startBin { + origLen := len(b.counts) + newLength := endBin - int(bin) + 1 + shift := b.startBin - bin + + if newLength > cap(b.counts) { + b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + } + + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + b.counts = b.counts[:newLength] + for i := 1; i < int(shift); i++ { + b.counts[i] = 0 + } + b.startBin = bin + b.counts[0] = 1 + return + } + // if the new is after the end add spaces to the end + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { + b.counts = b.counts[:bin-b.startBin+1] + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { + b.counts[i] = 0 + } + b.counts[bin-b.startBin] = 1 + return + } + + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + b.counts = append(b.counts, end...) + b.counts[bin-b.startBin] = 1 + } +} + +// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the +// correct lower resolution bucket. +func (b *expoBuckets) downscale(delta int32) { + // Example + // delta = 2 + // Original offset: -6 + // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 + // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1 + // new Offset: -2 + // new Counts: [4, 14, 30, 10] + + if len(b.counts) <= 1 || delta < 1 { + b.startBin = b.startBin >> delta + return + } + + steps := int32(1) << delta + offset := b.startBin % steps + offset = (offset + steps) % steps // to make offset positive + for i := 1; i < len(b.counts); i++ { + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] + continue + } + b.counts[idx/int(steps)] += b.counts[i] + } + + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) + b.counts = b.counts[:lastIdx+1] + b.startBin = b.startBin >> delta +} + +// newExponentialHistogram returns an Aggregator that summarizes a set of +// measurements as an exponential histogram. Each histogram is scoped by attributes +// and the aggregation cycle the measurements were made in. +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { + return &expoHistogram[N]{ + noSum: noSum, + noMinMax: noMinMax, + maxSize: int(maxSize), + maxScale: maxScale, + + newRes: r, + limit: newLimiter[*expoHistogramDataPoint[N]](limit), + values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), + + start: now(), + } +} + +// expoHistogram summarizes a set of measurements as an histogram with exponentially +// defined buckets. +type expoHistogram[N int64 | float64] struct { + noSum bool + noMinMax bool + maxSize int + maxScale int32 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*expoHistogramDataPoint[N]] + values map[attribute.Distinct]*expoHistogramDataPoint[N] + valuesMu sync.Mutex + + start time.Time +} + +func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // Ignore NaN and infinity. + if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { + return + } + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + attr := e.limit.Attributes(fltrAttr, e.values) + v, ok := e.values[attr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes() + + e.values[attr.Equivalent()] = v + } + v.record(value) + v.res.Offer(ctx, value, droppedAttr) +} + +func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.DeltaTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(e.values) + + e.start = t + h.DataPoints = hDPts + *dest = h + return n +} + +func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go new file mode 100644 index 000000000..ade0941f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "slices" + "sort" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type buckets[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + counts []uint64 + count uint64 + total N + min, max N +} + +// newBuckets returns buckets with n bins. +func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { + return &buckets[N]{attrs: attrs, counts: make([]uint64, n)} +} + +func (b *buckets[N]) sum(value N) { b.total += value } + +func (b *buckets[N]) bin(idx int, value N) { + b.counts[idx]++ + b.count++ + if value < b.min { + b.min = value + } else if value > b.max { + b.max = value + } +} + +// histValues summarizes a set of measurements as an histValues with +// explicitly defined buckets. +type histValues[N int64 | float64] struct { + noSum bool + bounds []float64 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*buckets[N]] + values map[attribute.Distinct]*buckets[N] + valuesMu sync.Mutex +} + +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { + // The responsibility of keeping all buckets correctly associated with the + // passed boundaries is ultimately this type's responsibility. Make a copy + // here so we can always guarantee this. Or, in the case of failure, have + // complete control over the fix. + b := slices.Clone(bounds) + slices.Sort(b) + return &histValues[N]{ + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[*buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), + } +} + +// Aggregate records the measurement value, scoped by attr, and aggregates it +// into a histogram. +func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // This search will return an index in the range [0, len(s.bounds)], where + // it will return len(s.bounds) if value is greater than the last element + // of s.bounds. This aligns with the buckets in that the length of buckets + // is len(s.bounds)+1, with the last bucket representing: + // (s.bounds[len(s.bounds)-1], +∞). + idx := sort.SearchFloat64s(s.bounds, float64(value)) + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + b, ok := s.values[attr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](attr, len(s.bounds)+1) + b.res = s.newRes() + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[attr.Equivalent()] = b + } + b.bin(idx, value) + if !s.noSum { + b.sum(value) + } + b.res.Offer(ctx, value, droppedAttr) +} + +// newHistogram returns an Aggregator that summarizes a set of measurements as +// an histogram. +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { + return &histogram[N]{ + histValues: newHistValues[N](boundaries, noSum, limit, r), + noMinMax: noMinMax, + start: now(), + } +} + +// histogram summarizes a set of measurements as an histogram with explicitly +// defined buckets. +type histogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.DeltaTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = val.counts + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + h.DataPoints = hDPts + *dest = h + + return n +} + +func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + + // The HistogramDataPoint field values returned need to be copies of + // the buckets value as we will keep updating them. + // + // TODO (#3047): Making copies for bounds and counts incurs a large + // memory allocation footprint. Alternatives should be explored. + hDPts[i].BucketCounts = slices.Clone(val.counts) + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go new file mode 100644 index 000000000..c35936840 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// datapoint is timestamped measurement data. +type datapoint[N int64 | float64] struct { + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] +} + +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { + return &lastValue[N]{ + newRes: r, + limit: newLimiter[datapoint[N]](limit), + values: make(map[attribute.Distinct]datapoint[N]), + start: now(), + } +} + +// lastValue summarizes a set of measurements as the last one made. +type lastValue[N int64 | float64] struct { + sync.Mutex + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[datapoint[N]] + values map[attribute.Distinct]datapoint[N] + start time.Time +} + +func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + d, ok := s.values[attr.Equivalent()] + if !ok { + d.res = s.newRes() + } + + d.attrs = attr + d.value = value + d.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = d +} + +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { + n := len(s.values) + *dest = reset(*dest, n, n) + + var i int + for _, v := range s.values { + (*dest)[i].Attributes = v.attrs + (*dest)[i].StartTime = s.start + (*dest)[i].Time = t + (*dest)[i].Value = v.value + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) + i++ + } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go new file mode 100644 index 000000000..9ea0251ed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import "go.opentelemetry.io/otel/attribute" + +// overflowSet is the attribute set used to record a measurement when adding +// another distinct attribute set to the aggregate would exceed the aggregate +// limit. +var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true)) + +// limiter limits aggregate values. +type limiter[V any] struct { + // aggLimit is the maximum number of metric streams that can be aggregated. + // + // Any metric stream with attributes distinct from any set already + // aggregated once the aggLimit will be meet will instead be aggregated + // into an "overflow" metric stream. That stream will only contain the + // "otel.metric.overflow"=true attribute. + aggLimit int +} + +// newLimiter returns a new Limiter with the provided aggregation limit. +func newLimiter[V any](aggregation int) limiter[V] { + return limiter[V]{aggLimit: aggregation} +} + +// Attributes checks if adding a measurement for attrs will exceed the +// aggregation cardinality limit for the existing measurements. If it will, +// overflowSet is returned. Otherwise, if it will not exceed the limit, or the +// limit is not set (limit <= 0), attr is returned. +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { + if l.aggLimit > 0 { + _, exists := measurements[attrs.Equivalent()] + if !exists && len(measurements) >= l.aggLimit-1 { + return overflowSet + } + } + + return attrs +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go new file mode 100644 index 000000000..891366922 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -0,0 +1,238 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type sumValue[N int64 | float64] struct { + n N + res exemplar.FilteredReservoir[N] + attrs attribute.Set +} + +// valueMap is the storage for sums. +type valueMap[N int64 | float64] struct { + sync.Mutex + newRes func() exemplar.FilteredReservoir[N] + limit limiter[sumValue[N]] + values map[attribute.Distinct]sumValue[N] +} + +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { + return &valueMap[N]{ + newRes: r, + limit: newLimiter[sumValue[N]](limit), + values: make(map[attribute.Distinct]sumValue[N]), + } +} + +func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + v, ok := s.values[attr.Equivalent()] + if !ok { + v.res = s.newRes() + } + + v.attrs = attr + v.n += value + v.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = v +} + +// newSum returns an aggregator that summarizes a set of measurements as their +// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle +// the measurements were made in. +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { + return &sum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// sum summarizes a set of measurements made as their arithmetic sum. +type sum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *sum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + i++ + } + // Do not report stale values. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, value := range s.values { + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = value.n + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + i++ + } + + sData.DataPoints = dPts + *dest = sData + + return n +} + +// newPrecomputedSum returns an aggregator that summarizes a set of +// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { + return &precomputedSum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// precomputedSum summarizes a set of observatrions as their arithmetic sum. +type precomputedSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time + + reported map[attribute.Distinct]N +} + +func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + newReported := make(map[attribute.Distinct]N) + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for key, value := range s.values { + delta := value.n - s.reported[key] + + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = delta + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + + newReported[key] = value.n + i++ + } + // Unused attribute sets do not report. + clear(s.values) + s.reported = newReported + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + + sData.DataPoints = dPts + *dest = sData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 000000000..5394f48e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 000000000..5a0f39ae1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]Exemplar) { + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 000000000..fcaa6a469 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 000000000..152a069a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool + +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() +} + +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 000000000..9fedfa4be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 000000000..a6ff86d02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram(bounds []float64) Reservoir { + slices.Sort(bounds) + return &histRes{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +type histRes struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 000000000..199a2608f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,191 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + rngMu.Lock() + defer rngMu.Unlock() + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} + r.reset() + return r +} + +type randRes struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 000000000..80fa59554 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 000000000..10b2976f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 000000000..1957d6b1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go new file mode 100644 index 000000000..19ec6806f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +// ReuseSlice returns a zeroed view of slice if its capacity is greater than or +// equal to n. Otherwise, it returns a new []T with capacity equal to n. +func ReuseSlice[T any](slice []T, n int) []T { + if cap(slice) >= n { + return slice[:n] + } + return make([]T, n) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md new file mode 100644 index 000000000..aba69d654 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -0,0 +1,112 @@ +# Experimental Features + +The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Cardinality Limit](#cardinality-limit) +- [Exemplars](#exemplars) + +### Cardinality Limit + +The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. + +This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. +The value must be an integer value. +All other values are ignored. + +If the value set is less than or equal to `0`, no limit will be applied. + +#### Examples + +Set the cardinality limit to 2000. + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=2000 +``` + +Set an infinite cardinality limit (functionally equivalent to disabling the feature). + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=-1 +``` + +Disable the cardinality limit. + +```console +unset OTEL_GO_X_CARDINALITY_LIMIT +``` + +### Exemplars + +A sample of measurements made may be exported directly as a set of exemplars. + +This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable. +The value of must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + +Exemplar filters are a supported. +The exemplar filter applies to all measurements made. +They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir. + +To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable. +The value must be the case-sensitive string defined by the [OpenTelemetry specification]. + +- `"always_on"`: allows all measurements +- `"always_off"`: denies all measurements +- `"trace_based"`: allows only sampled measurements + +All values other than these will result in the default, `"trace_based"`, exemplar filter being used. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar + +#### Examples + +Enable exemplars to be exported. + +```console +export OTEL_GO_X_EXEMPLAR=true +``` + +Disable exemplars from being exported. + +```console +unset OTEL_GO_X_EXEMPLAR +``` + +Set the exemplar filter to allow all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_on +``` + +Set the exemplar filter to deny all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_off +``` + +Set the exemplar filter to only allow sampled measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=trace_based +``` + +Revert to the default exemplar filter (`"trace_based"`) + +```console +unset OTEL_METRICS_EXEMPLAR_FILTER +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go new file mode 100644 index 000000000..8cd2f3741 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel metric SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" + +import ( + "os" + "strconv" + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go new file mode 100644 index 000000000..e0fd86ca7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ManualReader is a simple Reader that allows an application to +// read metrics on demand. +type ManualReader struct { + sdkProducer atomic.Value + shutdownOnce sync.Once + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// Compile time check the manualReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&ManualReader{}: {}} + +// NewManualReader returns a Reader which is directly called to collect metrics. +func NewManualReader(opts ...ManualReaderOption) *ManualReader { + cfg := newManualReaderConfig(opts) + r := &ManualReader{ + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + } + r.externalProducers.Store(cfg.producers) + return r +} + +// register stores the sdkProducer which enables the caller +// to read metrics from the SDK on demand. +func (mr *ManualReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register manual reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality { + return mr.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return mr.aggregationSelector(kind) +} + +// Shutdown closes any connections and frees any resources used by the reader. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Shutdown(context.Context) error { + err := ErrReaderShutdown + mr.shutdownOnce.Do(func() { + // Any future call to Collect will now return ErrReaderShutdown. + mr.sdkProducer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + mr.mu.Lock() + defer mr.mu.Unlock() + mr.isShutdown = true + // release references to Producer(s) + mr.externalProducers.Store([]Producer{}) + err = nil + }) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + } + p := mr.sdkProducer.Load() + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("manual reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range mr.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("ManualReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// MarshalLog returns logging data about the ManualReader. +func (r *ManualReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Registered bool + Shutdown bool + }{ + Type: "ManualReader", + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + } +} + +// manualReaderConfig contains configuration options for a ManualReader. +type manualReaderConfig struct { + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + producers []Producer +} + +// newManualReaderConfig returns a manualReaderConfig configured with options. +func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { + cfg := manualReaderConfig{ + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, opt := range opts { + cfg = opt.applyManual(cfg) + } + return cfg +} + +// ManualReaderOption applies a configuration option value to a ManualReader. +type ManualReaderOption interface { + applyManual(manualReaderConfig) manualReaderConfig +} + +// WithTemporalitySelector sets the TemporalitySelector a reader will use to +// determine the Temporality of an instrument based on its kind. If this +// option is not used, the reader will use the DefaultTemporalitySelector. +func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption { + return temporalitySelectorOption{selector: selector} +} + +type temporalitySelectorOption struct { + selector func(instrument InstrumentKind) metricdata.Temporality +} + +// applyManual returns a manualReaderConfig with option applied. +func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { + mrc.temporalitySelector = t.selector + return mrc +} + +// WithAggregationSelector sets the AggregationSelector a reader will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// or the aggregation explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector AggregationSelector) ManualReaderOption { + return aggregationSelectorOption{selector: selector} +} + +type aggregationSelectorOption struct { + selector AggregationSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.aggregationSelector = t.selector + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go new file mode 100644 index 000000000..2309e5b2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -0,0 +1,729 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +// ErrInstrumentName indicates the created instrument has an invalid name. +// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter. +var ErrInstrumentName = errors.New("invalid instrument name") + +// meter handles the creation and coordination of all metric instruments. A +// meter represents a single instrumentation scope; all metric telemetry +// produced by an instrumentation scope will use metric instruments from a +// single meter. +type meter struct { + embedded.Meter + + scope instrumentation.Scope + pipes pipelines + + int64Insts *cacheWithErr[instID, *int64Inst] + float64Insts *cacheWithErr[instID, *float64Inst] + int64ObservableInsts *cacheWithErr[instID, int64Observable] + float64ObservableInsts *cacheWithErr[instID, float64Observable] + + int64Resolver resolver[int64] + float64Resolver resolver[float64] +} + +func newMeter(s instrumentation.Scope, p pipelines) *meter { + // viewCache ensures instrument conflicts, including number conflicts, this + // meter is asked to create are logged to the user. + var viewCache cache[string, instID] + + var int64Insts cacheWithErr[instID, *int64Inst] + var float64Insts cacheWithErr[instID, *float64Inst] + var int64ObservableInsts cacheWithErr[instID, int64Observable] + var float64ObservableInsts cacheWithErr[instID, float64Observable] + + return &meter{ + scope: s, + pipes: p, + int64Insts: &int64Insts, + float64Insts: &float64Insts, + int64ObservableInsts: &int64ObservableInsts, + float64ObservableInsts: &float64ObservableInsts, + int64Resolver: newResolver[int64](p, &viewCache), + float64Resolver: newResolver[float64](p, &viewCache), + } +} + +// Compile-time check meter implements metric.Meter. +var _ metric.Meter = (*meter)(nil) + +// Int64Counter returns a new instrument identified by name and configured with +// options. The instrument is used to synchronously record increasing int64 +// measurements during a computational operation. +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + cfg := metric.NewInt64CounterConfig(options...) + const kind = InstrumentKindCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// int64 measurements during a computational operation. +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + cfg := metric.NewInt64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + cfg := metric.NewInt64HistogramConfig(options...) + p := int64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// int64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) { + inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.int64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := int64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Int64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableUpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// int64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64Counter returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record increasing +// float64 measurements during a computational operation. +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + cfg := metric.NewFloat64CounterConfig(options...) + const kind = InstrumentKindCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// float64 measurements during a computational operation. +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + cfg := metric.NewFloat64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + cfg := metric.NewFloat64HistogramConfig(options...) + p := float64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// float64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) { + inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.float64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := float64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Float64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableUpDownCounter returns a new instrument identified by name +// and configured with options. The instrument is used to asynchronously record +// float64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +func validateInstrumentName(name string) error { + if len(name) == 0 { + return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) + } + if len(name) > 255 { + return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name) + } + if !isAlpha([]rune(name)[0]) { + return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name) + } + if len(name) == 1 { + return nil + } + for _, c := range name[1:] { + if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' { + return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name) + } + } + return nil +} + +func isAlpha(c rune) bool { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} + +func isAlphanumeric(c rune) bool { + return isAlpha(c) || ('0' <= c && c <= '9') +} + +func warnRepeatedObservableCallbacks(id Instrument) { + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", + "instrument", inst, + ) +} + +// RegisterCallback registers f to be called each collection cycle so it will +// make observations for insts during those cycles. +// +// The only instruments f can make observations for are insts. All other +// observations will be dropped and an error will be logged. +// +// Only instruments from this meter can be registered with f, an error is +// returned if other instrument are provided. +// +// Only observations made in the callback will be exported. Unlike synchronous +// instruments, asynchronous callbacks can "forget" attribute sets that are no +// longer relevant by omitting the observation during the callback. +// +// The returned Registration can be used to unregister f. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if len(insts) == 0 { + // Don't allocate a observer if not needed. + return noopRegister{}, nil + } + + reg := newObserver() + var errs multierror + for _, inst := range insts { + // Unwrap any global. + if u, ok := inst.(interface { + Unwrap() metric.Observable + }); ok { + inst = u.Unwrap() + } + + switch o := inst.(type) { + case int64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerInt64(o.observablID) + case float64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerFloat64(o.observablID) + default: + // Instrument external to the SDK. + return nil, fmt.Errorf("invalid observable: from different implementation") + } + } + + err := errs.errorOrNil() + if reg.len() == 0 { + // All insts use drop aggregation or are invalid. + return noopRegister{}, err + } + + // Some or all instruments were valid. + cback := func(ctx context.Context) error { return f(ctx, reg) } + return m.pipes.registerMultiCallback(cback), err +} + +type observer struct { + embedded.Observer + + float64 map[observablID[float64]]struct{} + int64 map[observablID[int64]]struct{} +} + +func newObserver() observer { + return observer{ + float64: make(map[observablID[float64]]struct{}), + int64: make(map[observablID[int64]]struct{}), + } +} + +func (r observer) len() int { + return len(r.float64) + len(r.int64) +} + +func (r observer) registerFloat64(id observablID[float64]) { + r.float64[id] = struct{}{} +} + +func (r observer) registerInt64(id observablID[int64]) { + r.int64[id] = struct{}{} +} + +var ( + errUnknownObserver = errors.New("unknown observable instrument") + errUnregObserver = errors.New("observable instrument not registered for callback") +) + +func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) { + var oImpl float64Observable + switch conv := o.(type) { + case float64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(float64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.float64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", float64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { + var oImpl int64Observable + switch conv := o.(type) { + case int64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(int64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.int64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", int64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +type noopRegister struct{ embedded.Registration } + +func (noopRegister) Unregister() error { + return nil +} + +// int64InstProvider provides int64 OpenTelemetry instruments. +type int64InstProvider struct{ *meter } + +func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.int64Resolver.Aggregators(inst) +} + +func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*int64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &int64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*int64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &int64Inst{measures: aggs}, err + }) +} + +// float64InstProvider provides float64 OpenTelemetry instruments. +type float64InstProvider struct{ *meter } + +func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.float64Resolver.Aggregators(inst) +} + +func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*float64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &float64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*float64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &float64Inst{measures: aggs}, err + }) +} + +type int64Observer struct { + embedded.Int64Observer + measures[int64] +} + +func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} + +type float64Observer struct { + embedded.Float64Observer + measures[float64] +} + +func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md new file mode 100644 index 000000000..d1390df1b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md @@ -0,0 +1,3 @@ +# SDK Metric data + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go new file mode 100644 index 000000000..d32cfc67d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go @@ -0,0 +1,296 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +import ( + "encoding/json" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// ResourceMetrics is a collection of ScopeMetrics and the associated Resource +// that created them. +type ResourceMetrics struct { + // Resource represents the entity that collected the metrics. + Resource *resource.Resource + // ScopeMetrics are the collection of metrics with unique Scopes. + ScopeMetrics []ScopeMetrics +} + +// ScopeMetrics is a collection of Metrics Produces by a Meter. +type ScopeMetrics struct { + // Scope is the Scope that the Meter was created with. + Scope instrumentation.Scope + // Metrics are a list of aggregations created by the Meter. + Metrics []Metrics +} + +// Metrics is a collection of one or more aggregated timeseries from an Instrument. +type Metrics struct { + // Name is the name of the Instrument that created this data. + Name string + // Description is the description of the Instrument, which can be used in documentation. + Description string + // Unit is the unit in which the Instrument reports. + Unit string + // Data is the aggregated data from an Instrument. + Data Aggregation +} + +// Aggregation is the store of data reported by an Instrument. +// It will be one of: Gauge, Sum, Histogram. +type Aggregation interface { + privateAggregation() +} + +// Gauge represents a measurement of the current value of an instrument. +type Gauge[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] +} + +func (Gauge[N]) privateAggregation() {} + +// Sum represents the sum of all measurements of values from an instrument. +type Sum[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality + // IsMonotonic represents if this aggregation only increases or decreases. + IsMonotonic bool +} + +func (Sum[N]) privateAggregation() {} + +// DataPoint is a single data point in a timeseries. +type DataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. (optional) + StartTime time.Time `json:",omitempty"` + // Time is the time when the timeseries was recorded. (optional) + Time time.Time `json:",omitempty"` + // Value is the value of this data point. + Value N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// Histogram represents the histogram of all measurements of values from an instrument. +type Histogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []HistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (Histogram[N]) privateAggregation() {} + +// HistogramDataPoint is a single histogram data point in a timeseries. +type HistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Bounds are the upper bounds of the buckets of the histogram. Because the + // last boundary is +infinity this one is implied. + Bounds []float64 + // BucketCounts is the count of each of the buckets. + BucketCounts []uint64 + + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialHistogram represents the histogram of all measurements of values from an instrument. +type ExponentialHistogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []ExponentialHistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (ExponentialHistogram[N]) privateAggregation() {} + +// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries. +type ExponentialHistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = 2 ^ (2 ^ -Scale) + Scale int32 + // ZeroCount is the number of values whose absolute value + // is less than or equal to [ZeroThreshold]. + // When ZeroThreshold is 0, this is the number of values that + // cannot be expressed using the standard exponential formula + // as well as values that have been rounded to zero. + // ZeroCount represents the special zero count bucket. + ZeroCount uint64 + + // PositiveBucket is range of positive value bucket counts. + PositiveBucket ExponentialBucket + // NegativeBucket is range of negative value bucket counts. + NegativeBucket ExponentialBucket + + // ZeroThreshold is the width of the zero region. Where the zero region is + // defined as the closed interval [-ZeroThreshold, ZeroThreshold]. + ZeroThreshold float64 + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialBucket are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialBucket struct { + // Offset is the bucket index of the first entry in the Counts slice. + Offset int32 + // Counts is an slice where Counts[i] carries the count of the bucket at + // index (Offset+i). Counts[i] is the count of values greater than + // base^(Offset+i) and less than or equal to base^(Offset+i+1). + Counts []uint64 +} + +// Extrema is the minimum or maximum value of a dataset. +type Extrema[N int64 | float64] struct { + value N + valid bool +} + +// MarshalText converts the Extrema value to text. +func (e Extrema[N]) MarshalText() ([]byte, error) { + if !e.valid { + return json.Marshal(nil) + } + return json.Marshal(e.value) +} + +// MarshalJSON converts the Extrema value to JSON number. +func (e *Extrema[N]) MarshalJSON() ([]byte, error) { + return e.MarshalText() +} + +// NewExtrema returns an Extrema set to v. +func NewExtrema[N int64 | float64](v N) Extrema[N] { + return Extrema[N]{value: v, valid: true} +} + +// Value returns the Extrema value and true if the Extrema is defined. +// Otherwise, if the Extrema is its zero-value, defined will be false. +func (e Extrema[N]) Value() (v N, defined bool) { + return e.value, e.valid +} + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar[N int64 | float64] struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value N + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// data type. +// +// These data points cannot always be merged in a meaningful way. The Summary +// type is only used by bridges from other metrics libraries, and cannot be +// produced using OpenTelemetry instrumentation. +type Summary struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []SummaryDataPoint +} + +func (Summary) privateAggregation() {} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this summary has been calculated with. + Count uint64 + + // Sum is the sum of the values recorded. + Sum float64 + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []QuantileValue +} + +// QuantileValue is the value at a given quantile of a summary. +type QuantileValue struct { + // Quantile is the quantile of this value. + // + // Must be in the interval [0.0, 1.0]. + Quantile float64 + + // Value is the value at the given quantile of a summary. + // + // Quantile values must NOT be negative. + Value float64 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go new file mode 100644 index 000000000..187713dad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Temporality + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +// Temporality defines the window that an aggregation was calculated over. +type Temporality uint8 + +const ( + // undefinedTemporality represents an unset Temporality. + //nolint:deadcode,unused,varcheck + undefinedTemporality Temporality = iota + + // CumulativeTemporality defines a measurement interval that continues to + // expand forward in time from a starting point. New measurements are + // added to all previous measurements since a start time. + CumulativeTemporality + + // DeltaTemporality defines a measurement interval that resets each cycle. + // Measurements from one cycle are recorded independently, measurements + // from other cycles do not affect them. + DeltaTemporality +) + +// MarshalText returns the byte encoded of t. +func (t Temporality) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go new file mode 100644 index 000000000..4da833cdc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Temporality"; DO NOT EDIT. + +package metricdata + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[undefinedTemporality-0] + _ = x[CumulativeTemporality-1] + _ = x[DeltaTemporality-2] +} + +const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" + +var _Temporality_index = [...]uint8{0, 20, 41, 57} + +func (i Temporality) String() string { + if i >= Temporality(len(_Temporality_index)-1) { + return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go new file mode 100644 index 000000000..67ee1b11a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Default periodic reader timing. +const ( + defaultTimeout = time.Millisecond * 30000 + defaultInterval = time.Millisecond * 60000 +) + +// periodicReaderConfig contains configuration options for a PeriodicReader. +type periodicReaderConfig struct { + interval time.Duration + timeout time.Duration + producers []Producer +} + +// newPeriodicReaderConfig returns a periodicReaderConfig configured with +// options. +func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { + c := periodicReaderConfig{ + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + } + for _, o := range options { + c = o.applyPeriodic(c) + } + return c +} + +// PeriodicReaderOption applies a configuration option value to a PeriodicReader. +type PeriodicReaderOption interface { + applyPeriodic(periodicReaderConfig) periodicReaderConfig +} + +// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. +type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig + +// applyPeriodic returns a periodicReaderConfig with option(s) applied. +func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { + return o(conf) +} + +// WithTimeout configures the time a PeriodicReader waits for an export to +// complete before canceling it. This includes an export which occurs as part +// of Shutdown or ForceFlush if the user passed context does not have a +// deadline. If the user passed context does have a deadline, it will be used +// instead. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_TIMEOUT environment variable. +// +// If this option is not used or d is less than or equal to zero, 30 seconds +// is used as the default. +func WithTimeout(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.timeout = d + return conf + }) +} + +// WithInterval configures the intervening time between exports for a +// PeriodicReader. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_INTERVAL environment variable. +// +// If this option is not used or d is less than or equal to zero, 60 seconds +// is used as the default. +func WithInterval(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.interval = d + return conf + }) +} + +// NewPeriodicReader returns a Reader that collects and exports metric data to +// the exporter at a defined interval. By default, the returned Reader will +// collect and export data every 60 seconds, and will cancel any attempts that +// exceed 30 seconds, collect and export combined. The collect and export time +// are not counted towards the interval between attempts. +// +// The Collect method of the returned Reader continues to gather and return +// metric data to the user. It will not automatically send that data to the +// exporter. That is left to the user to accomplish. +func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { + conf := newPeriodicReaderConfig(options) + ctx, cancel := context.WithCancel(context.Background()) + r := &PeriodicReader{ + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + rmPool: sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + }, + } + r.externalProducers.Store(conf.producers) + + go func() { + defer func() { close(r.done) }() + r.run(ctx, conf.interval) + }() + + return r +} + +// PeriodicReader is a Reader that continuously collects and exports metric +// data at a set interval. +type PeriodicReader struct { + sdkProducer atomic.Value + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + interval time.Duration + timeout time.Duration + exporter Exporter + flushCh chan chan error + + done chan struct{} + cancel context.CancelFunc + shutdownOnce sync.Once + + rmPool sync.Pool +} + +// Compile time check the periodicReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&PeriodicReader{}: {}} + +// newTicker allows testing override. +var newTicker = time.NewTicker + +// run continuously collects and exports metric data at the specified +// interval. This will run until ctx is canceled or times out. +func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) { + ticker := newTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := r.collectAndExport(ctx) + if err != nil { + otel.Handle(err) + } + case errCh := <-r.flushCh: + errCh <- r.collectAndExport(ctx) + ticker.Reset(interval) + case <-ctx.Done(): + return + } + } +} + +// register registers p as the producer of this reader. +func (r *PeriodicReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register periodic reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality { + return r.exporter.Temporality(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.exporter.Aggregation(kind) +} + +// collectAndExport gather all metric data related to the periodicReader r from +// the SDK and exports it with r's exporter. +func (r *PeriodicReader) collectAndExport(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. + rm := r.rmPool.Get().(*metricdata.ResourceMetrics) + err := r.Collect(ctx, rm) + if err == nil { + err = r.export(ctx, rm) + } + r.rmPool.Put(rm) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. The metric +// data is not exported to the configured exporter, it is left to the caller to +// handle that if desired. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("periodic reader: *metricdata.ResourceMetrics is nil") + } + // TODO (#3047): When collect is updated to accept output as param, pass rm. + return r.collect(ctx, r.sdkProducer.Load(), rm) +} + +// collect unwraps p as a produceHolder and returns its produce results. +func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("periodic reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range r.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("PeriodicReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// export exports metric data m using r's exporter. +func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error { + return r.exporter.Export(ctx, m) +} + +// ForceFlush flushes pending telemetry. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) ForceFlush(ctx context.Context) error { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + errCh := make(chan error, 1) + select { + case r.flushCh <- errCh: + select { + case err := <-errCh: + if err != nil { + return err + } + close(errCh) + case <-ctx.Done(): + return ctx.Err() + } + case <-r.done: + return ErrReaderShutdown + case <-ctx.Done(): + return ctx.Err() + } + return r.exporter.ForceFlush(ctx) +} + +// Shutdown flushes pending telemetry and then stops the export pipeline. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Shutdown(ctx context.Context) error { + err := ErrReaderShutdown + r.shutdownOnce.Do(func() { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + // Stop the run loop. + r.cancel() + <-r.done + + // Any future call to Collect will now return ErrReaderShutdown. + ph := r.sdkProducer.Swap(produceHolder{ + produce: shutdownProducer{}.produce, + }) + + if ph != nil { // Reader was registered. + // Flush pending telemetry. + m := r.rmPool.Get().(*metricdata.ResourceMetrics) + err = r.collect(ctx, ph, m) + if err == nil { + err = r.export(ctx, m) + } + r.rmPool.Put(m) + } + + sErr := r.exporter.Shutdown(ctx) + if err == nil || errors.Is(err, ErrReaderShutdown) { + err = sErr + } + + r.mu.Lock() + defer r.mu.Unlock() + r.isShutdown = true + // release references to Producer(s) + r.externalProducers.Store([]Producer{}) + }) + return err +} + +// MarshalLog returns logging data about the PeriodicReader. +func (r *PeriodicReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Exporter Exporter + Registered bool + Shutdown bool + Interval time.Duration + Timeout time.Duration + }{ + Type: "PeriodicReader", + Exporter: r.exporter, + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + Interval: r.interval, + Timeout: r.timeout, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go new file mode 100644 index 000000000..823bf2fe3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -0,0 +1,655 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "container/list" + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var ( + errCreatingAggregators = errors.New("could not create all aggregators") + errIncompatibleAggregation = errors.New("incompatible aggregation") + errUnknownAggregation = errors.New("unrecognized aggregation") +) + +// instrumentSync is a synchronization point between a pipeline and an +// instrument's aggregate function. +type instrumentSync struct { + name string + description string + unit string + compAgg aggregate.ComputeAggregation +} + +func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { + if res == nil { + res = resource.Empty() + } + return &pipeline{ + resource: res, + reader: reader, + views: views, + // aggregations is lazy allocated when needed. + } +} + +// pipeline connects all of the instruments created by a meter provider to a Reader. +// This is the object that will be `Reader.register()` when a meter provider is created. +// +// As instruments are created the instrument should be checked if it exists in +// the views of a the Reader, and if so each aggregate function should be added +// to the pipeline. +type pipeline struct { + resource *resource.Resource + + reader Reader + views []View + + sync.Mutex + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List +} + +// addSync adds the instrumentSync to pipeline p with scope. This method is not +// idempotent. Duplicate calls will result in duplicate additions, it is the +// callers responsibility to ensure this is called with unique values. +func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) { + p.Lock() + defer p.Unlock() + if p.aggregations == nil { + p.aggregations = map[instrumentation.Scope][]instrumentSync{ + scope: {iSync}, + } + return + } + p.aggregations[scope] = append(p.aggregations[scope], iSync) +} + +type multiCallback func(context.Context) error + +// addMultiCallback registers a multi-instrument callback to be run when +// `produce()` is called. +func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { + p.Lock() + defer p.Unlock() + e := p.multiCallbacks.PushBack(c) + return func() { + p.Lock() + p.multiCallbacks.Remove(e) + p.Unlock() + } +} + +// produce returns aggregated metrics from a single collection. +// +// This method is safe to call concurrently. +func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + p.Lock() + defer p.Unlock() + + var errs multierror + for _, c := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + if err := c(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { + // TODO make the callbacks parallel. ( #3034 ) + f := e.Value.(multiCallback) + if err := f(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + + rm.Resource = p.resource + rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations)) + + i := 0 + for scope, instruments := range p.aggregations { + rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments)) + j := 0 + for _, inst := range instruments { + data := rm.ScopeMetrics[i].Metrics[j].Data + if n := inst.compAgg(&data); n > 0 { + rm.ScopeMetrics[i].Metrics[j].Name = inst.name + rm.ScopeMetrics[i].Metrics[j].Description = inst.description + rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit + rm.ScopeMetrics[i].Metrics[j].Data = data + j++ + } + } + rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j] + if len(rm.ScopeMetrics[i].Metrics) > 0 { + rm.ScopeMetrics[i].Scope = scope + i++ + } + } + + rm.ScopeMetrics = rm.ScopeMetrics[:i] + + return errs.errorOrNil() +} + +// inserter facilitates inserting of new instruments from a single scope into a +// pipeline. +type inserter[N int64 | float64] struct { + // aggregators is a cache that holds aggregate function inputs whose + // outputs have been inserted into the underlying reader pipeline. This + // cache ensures no duplicate aggregate functions are inserted into the + // reader pipeline and if a new request during an instrument creation asks + // for the same aggregate function input the same instance is returned. + aggregators *cache[instID, aggVal[N]] + + // views is a cache that holds instrument identifiers for all the + // instruments a Meter has created, it is provided from the Meter that owns + // this inserter. This cache ensures during the creation of instruments + // with the same name but different options (e.g. description, unit) a + // warning message is logged. + views *cache[string, instID] + + pipeline *pipeline +} + +func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] { + if vc == nil { + vc = &cache[string, instID]{} + } + return &inserter[N]{ + aggregators: &cache[instID, aggVal[N]]{}, + views: vc, + pipeline: p, + } +} + +// Instrument inserts the instrument inst with instUnit into a pipeline. All +// views the pipeline contains are matched against, and any matching view that +// creates a unique aggregate function will have its output inserted into the +// pipeline and its input included in the returned slice. +// +// The returned aggregate function inputs are ensured to be deduplicated and +// unique. If another view in another pipeline that is cached by this +// inserter's cache has already inserted the same aggregate function for the +// same instrument, that functions input instance is returned. +// +// If another instrument has already been inserted by this inserter, or any +// other using the same cache, and it conflicts with the instrument being +// inserted in this call, an aggregate function input matching the arguments +// will still be returned but an Info level log message will also be logged to +// the OTel global logger. +// +// If the passed instrument would result in an incompatible aggregate function, +// an error is returned and that aggregate function output is not inserted nor +// is its input returned. +// +// If an instrument is determined to use a Drop aggregation, that instrument is +// not inserted nor returned. +func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) { + var ( + matched bool + measures []aggregate.Measure[N] + ) + + errs := &multierror{wrapped: errCreatingAggregators} + seen := make(map[uint64]struct{}) + for _, v := range i.pipeline.views { + stream, match := v(inst) + if !match { + continue + } + matched = true + in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in == nil { // Drop aggregation. + continue + } + if _, ok := seen[id]; ok { + // This aggregate function has already been added. + continue + } + seen[id] = struct{}{} + measures = append(measures, in) + } + + if matched { + return measures, errs.errorOrNil() + } + + // Apply implicit default view if no explicit matched. + stream := Stream{ + Name: inst.Name, + Description: inst.Description, + Unit: inst.Unit, + } + in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in != nil { + // Ensured to have not seen given matched was false. + measures = append(measures, in) + } + return measures, errs.errorOrNil() +} + +// addCallback registers a single instrument callback to be run when +// `produce()` is called. +func (i *inserter[N]) addCallback(cback func(context.Context) error) { + i.pipeline.Lock() + defer i.pipeline.Unlock() + i.pipeline.callbacks = append(i.pipeline.callbacks, cback) +} + +var aggIDCount uint64 + +// aggVal is the cached value in an aggregators cache. +type aggVal[N int64 | float64] struct { + ID uint64 + Measure aggregate.Measure[N] + Err error +} + +// readerDefaultAggregation returns the default aggregation for the instrument +// kind based on the reader's aggregation preferences. This is used unless the +// aggregation is overridden with a view. +func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation { + aggregation := i.pipeline.reader.aggregation(kind) + switch aggregation.(type) { + case nil, AggregationDefault: + // If the reader returns default or nil use the default selector. + aggregation = DefaultAggregationSelector(kind) + default: + // Deep copy and validate before using. + aggregation = aggregation.copy() + if err := aggregation.err(); err != nil { + orig := aggregation + aggregation = DefaultAggregationSelector(kind) + global.Error( + err, "using default aggregation instead", + "aggregation", orig, + "replacement", aggregation, + ) + } + } + return aggregation +} + +// cachedAggregator returns the appropriate aggregate input and output +// functions for an instrument configuration. If the exact instrument has been +// created within the inst.Scope, those aggregate function instances will be +// returned. Otherwise, new computed aggregate functions will be cached and +// returned. +// +// If the instrument configuration conflicts with an instrument that has +// already been created (e.g. description, unit, data type) a warning will be +// logged at the "Info" level with the global OTel logger. Valid new aggregate +// functions for the instrument configuration will still be returned without an +// error. +// +// If the instrument defines an unknown or incompatible aggregation, an error +// is returned. +func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { + switch stream.Aggregation.(type) { + case nil: + // The aggregation was not overridden with a view. Use the aggregation + // provided by the reader. + stream.Aggregation = readerAggregation + case AggregationDefault: + // The view explicitly requested the default aggregation. + stream.Aggregation = DefaultAggregationSelector(kind) + } + + if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { + return nil, 0, fmt.Errorf( + "creating aggregator with instrumentKind: %d, aggregation %v: %w", + kind, stream.Aggregation, err, + ) + } + + id := i.instID(kind, stream) + // If there is a conflict, the specification says the view should + // still be applied and a warning should be logged. + i.logConflict(id) + + // If there are requests for the same instrument with different name + // casing, the first-seen needs to be returned. Use a normalize ID for the + // cache lookup to ensure the correct comparison. + normID := id.normalize() + cv := i.aggregators.Lookup(normID, func() aggVal[N] { + b := aggregate.Builder[N]{ + Temporality: i.pipeline.reader.temporality(kind), + ReservoirFunc: reservoirFunc[N](stream.Aggregation), + } + b.Filter = stream.AttributeFilter + // A value less than or equal to zero will disable the aggregation + // limits for the builder (an all the created aggregates). + // CardinalityLimit.Lookup returns 0 by default if unset (or + // unrecognized input). Use that value directly. + b.AggregationLimit, _ = x.CardinalityLimit.Lookup() + + in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) + if err != nil { + return aggVal[N]{0, nil, err} + } + if in == nil { // Drop aggregator. + return aggVal[N]{0, nil, nil} + } + i.pipeline.addSync(scope, instrumentSync{ + // Use the first-seen name casing for this and all subsequent + // requests of this instrument. + name: stream.Name, + description: stream.Description, + unit: stream.Unit, + compAgg: out, + }) + id := atomic.AddUint64(&aggIDCount, 1) + return aggVal[N]{id, in, err} + }) + return cv.Measure, cv.ID, cv.Err +} + +// logConflict validates if an instrument with the same case-insensitive name +// as id has already been created. If that instrument conflicts with id, a +// warning is logged. +func (i *inserter[N]) logConflict(id instID) { + // The API specification defines names as case-insensitive. If there is a + // different casing of a name it needs to be a conflict. + name := id.normalize().Name + existing := i.views.Lookup(name, func() instID { return id }) + if id == existing { + return + } + + const msg = "duplicate metric stream definitions" + args := []interface{}{ + "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), + "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), + "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), + "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit), + "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number), + } + + // The specification recommends logging a suggested view to resolve + // conflicts if possible. + // + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration + if id.Unit != existing.Unit || id.Number != existing.Number { + // There is no view resolution for these, don't make a suggestion. + global.Warn(msg, args...) + return + } + + var stream string + if id.Name != existing.Name || id.Kind != existing.Kind { + stream = `Stream{Name: "{{NEW_NAME}}"}` + } else if id.Description != existing.Description { + stream = fmt.Sprintf("Stream{Description: %q}", existing.Description) + } + + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream)) + + global.Warn(msg, args...) +} + +func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { + var zero N + return instID{ + Name: stream.Name, + Description: stream.Description, + Unit: stream.Unit, + Kind: kind, + Number: fmt.Sprintf("%T", zero), + } +} + +// aggregateFunc returns new aggregate functions matching agg, kind, and +// monotonic. If the agg is unknown or temporality is invalid, an error is +// returned. +func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { + switch a := agg.(type) { + case AggregationDefault: + return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) + case AggregationDrop: + // Return nil in and out to signify the drop aggregator. + case AggregationLastValue: + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter: + meas, comp = b.PrecomputedSum(true) + case InstrumentKindObservableUpDownCounter: + meas, comp = b.PrecomputedSum(false) + case InstrumentKindCounter, InstrumentKindHistogram: + meas, comp = b.Sum(true) + default: + // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and + // instrumentKindUndefined or other invalid instrument kinds. + meas, comp = b.Sum(false) + } + case AggregationExplicitBucketHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum) + case AggregationBase2ExponentialHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum) + + default: + err = errUnknownAggregation + } + + return meas, comp, err +} + +// isAggregatorCompatible checks if the aggregation can be used by the instrument. +// Current compatibility: +// +// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | +// |--------------------------|------|-----------|-----|-----------|-----------------------| +// | Counter | ✓ | | ✓ | ✓ | ✓ | +// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | +// | Observable Counter | ✓ | | ✓ | ✓ | ✓ | +// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. +func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { + switch agg.(type) { + case AggregationDefault: + return nil + case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram: + switch kind { + case InstrumentKindCounter, + InstrumentKindUpDownCounter, + InstrumentKindHistogram, + InstrumentKindGauge, + InstrumentKindObservableCounter, + InstrumentKindObservableUpDownCounter, + InstrumentKindObservableGauge: + return nil + default: + return errIncompatibleAggregation + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter: + return nil + default: + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + } + case AggregationLastValue: + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: + return nil + } + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + case AggregationDrop: + return nil + default: + // This is used passed checking for default, it should be an error at this point. + return fmt.Errorf("%w: %v", errUnknownAggregation, agg) + } +} + +// pipelines is the group of pipelines connecting Readers with instrument +// measurement. +type pipelines []*pipeline + +func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { + pipes := make([]*pipeline, 0, len(readers)) + for _, r := range readers { + p := newPipeline(res, r, views) + r.register(p) + pipes = append(pipes, p) + } + return pipes +} + +func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { + unregs := make([]func(), len(p)) + for i, pipe := range p { + unregs[i] = pipe.addMultiCallback(c) + } + return unregisterFuncs{f: unregs} +} + +type unregisterFuncs struct { + embedded.Registration + f []func() +} + +func (u unregisterFuncs) Unregister() error { + for _, f := range u.f { + f() + } + return nil +} + +// resolver facilitates resolving aggregate functions an instrument calls to +// aggregate measurements with while updating all pipelines that need to pull +// from those aggregations. +type resolver[N int64 | float64] struct { + inserters []*inserter[N] +} + +func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] { + in := make([]*inserter[N], len(p)) + for i := range in { + in[i] = newInserter[N](p[i], vc) + } + return resolver[N]{in} +} + +// Aggregators returns the Aggregators that must be updated by the instrument +// defined by key. +func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument +// defined by key. If boundaries were provided on instrument instantiation, those take precedence +// over boundaries provided by the reader. +func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + agg := i.readerDefaultAggregation(id.Kind) + if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { + histAgg.Boundaries = boundaries + agg = histAgg + } + in, err := i.Instrument(id, agg) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + if m.wrapped == nil { + return errors.New(strings.Join(m.errors, "; ")) + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go new file mode 100644 index 000000000..a82af538e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +// MeterProvider handles the creation and coordination of Meters. All Meters +// created by a MeterProvider will be associated with the same Resource, have +// the same Views applied to them, and have their produced metric telemetry +// passed to the configured Readers. +type MeterProvider struct { + embedded.MeterProvider + + pipes pipelines + meters cache[instrumentation.Scope, *meter] + + forceFlush, shutdown func(context.Context) error + stopped atomic.Bool +} + +// Compile-time check MeterProvider implements metric.MeterProvider. +var _ metric.MeterProvider = (*MeterProvider)(nil) + +// NewMeterProvider returns a new and configured MeterProvider. +// +// By default, the returned MeterProvider is configured with the default +// Resource and no Readers. Readers cannot be added after a MeterProvider is +// created. This means the returned MeterProvider, one created with no +// Readers, will perform no operations. +func NewMeterProvider(options ...Option) *MeterProvider { + conf := newConfig(options) + flush, sdown := conf.readerSignals() + + mp := &MeterProvider{ + pipes: newPipelines(conf.res, conf.readers, conf.views), + forceFlush: flush, + shutdown: sdown, + } + // Log after creation so all readers show correctly they are registered. + global.Info("MeterProvider created", + "Resource", conf.res, + "Readers", conf.readers, + "Views", len(conf.views), + ) + return mp +} + +// Meter returns a Meter with the given name and configured with options. +// +// The name should be the name of the instrumentation scope creating +// telemetry. This name may be the same as the instrumented code only if that +// code provides built-in instrumentation. +// +// Calls to the Meter method after Shutdown has been called will return Meters +// that perform no operations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { + if name == "" { + global.Warn("Invalid Meter name.", "name", name) + } + + if mp.stopped.Load() { + return noop.Meter{} + } + + c := metric.NewMeterConfig(options...) + s := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + global.Info("Meter created", + "Name", s.Name, + "Version", s.Version, + "SchemaURL", s.SchemaURL, + ) + + return mp.meters.Lookup(s, func() *meter { + return newMeter(s, mp.pipes) + }) +} + +// ForceFlush flushes all pending telemetry. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// ForceFlush calls ForceFlush(context.Context) error +// on all Readers that implements this method. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) ForceFlush(ctx context.Context) error { + if mp.forceFlush != nil { + return mp.forceFlush(ctx) + } + return nil +} + +// Shutdown shuts down the MeterProvider flushing all pending telemetry and +// releasing any held computational resources. +// +// This call is idempotent. The first call will perform all flush and +// releasing operations. Subsequent calls will perform no action and will +// return an error stating this. +// +// Measurements made by instruments from meters this MeterProvider created +// will not be exported after Shutdown is called. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Shutdown(ctx context.Context) error { + // Even though it may seem like there is a synchronization issue between the + // call to `Store` and checking `shutdown`, the Go concurrency model ensures + // that is not the case, as all the atomic operations executed in a program + // behave as though executed in some sequentially consistent order. This + // definition provides the same semantics as C++'s sequentially consistent + // atomics and Java's volatile variables. + // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic. + + mp.stopped.Store(true) + if mp.shutdown != nil { + return mp.shutdown(ctx) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go new file mode 100644 index 000000000..d94bdee75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// errDuplicateRegister is logged by a Reader when an attempt to registered it +// more than once occurs. +var errDuplicateRegister = fmt.Errorf("duplicate reader registration") + +// ErrReaderNotRegistered is returned if Collect or Shutdown are called before +// the reader is registered with a MeterProvider. +var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") + +// ErrReaderShutdown is returned if Collect or Shutdown are called after a +// reader has been Shutdown once. +var ErrReaderShutdown = fmt.Errorf("reader is shutdown") + +// errNonPositiveDuration is logged when an environmental variable +// has non-positive value. +var errNonPositiveDuration = fmt.Errorf("non-positive duration") + +// Reader is the interface used between the SDK and an +// exporter. Control flow is bi-directional through the +// Reader, since the SDK initiates ForceFlush and Shutdown +// while the exporter initiates collection. The Register() method here +// informs the Reader that it can begin reading, signaling the +// start of bi-directional control flow. +// +// Typically, push-based exporters that are periodic will +// implement PeriodicExporter themselves and construct a +// PeriodicReader to satisfy this interface. +// +// Pull-based exporters will typically implement Register +// themselves, since they read on demand. +// +// Warning: methods may be added to this interface in minor releases. +type Reader interface { + // register registers a Reader with a MeterProvider. + // The producer argument allows the Reader to signal the sdk to collect + // and send aggregated metric measurements. + register(sdkProducer) + + // temporality reports the Temporality for the instrument kind provided. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + temporality(InstrumentKind) metricdata.Temporality + + // aggregation returns what Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + + // Collect gathers and returns all metric data related to the Reader from + // the SDK and stores it in out. An error is returned if this is called + // after Shutdown or if out is nil. + // + // This method needs to be concurrent safe, and the cancellation of the + // passed context is expected to be honored. + Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric measurements held in an export pipeline and releases any + // held computational resources. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + // + // After Shutdown is called, calls to Collect will perform no operation and instead will return + // an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// sdkProducer produces metrics for a Reader. +type sdkProducer interface { + // produce returns aggregated metrics from a single collection. + // + // This method is safe to call concurrently. + produce(context.Context, *metricdata.ResourceMetrics) error +} + +// Producer produces metrics for a Reader from an external source. +type Producer interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Produce returns aggregated metrics from an external source. + // + // This method should be safe to call concurrently. + Produce(context.Context) ([]metricdata.ScopeMetrics, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// produceHolder is used as an atomic.Value to wrap the non-concrete producer +// type. +type produceHolder struct { + produce func(context.Context, *metricdata.ResourceMetrics) error +} + +// shutdownProducer produces an ErrReaderShutdown error always. +type shutdownProducer struct{} + +// produce returns an ErrReaderShutdown error. +func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { + return ErrReaderShutdown +} + +// TemporalitySelector selects the temporality to use based on the InstrumentKind. +type TemporalitySelector func(InstrumentKind) metricdata.Temporality + +// DefaultTemporalitySelector is the default TemporalitySelector used if +// WithTemporalitySelector is not provided. CumulativeTemporality will be used +// for all instrument kinds if this TemporalitySelector is used. +func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +// AggregationSelector selects the aggregation and the parameters to use for +// that aggregation based on the InstrumentKind. +// +// If the Aggregation returned is nil or DefaultAggregation, the selection from +// DefaultAggregationSelector will be used. +type AggregationSelector func(InstrumentKind) Aggregation + +// DefaultAggregationSelector returns the default aggregation and parameters +// that will be used to summarize measurement made from an instrument of +// InstrumentKind. This AggregationSelector using the following selection +// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum, +// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue, +// Histogram ⇨ ExplicitBucketHistogram. +func DefaultAggregationSelector(ik InstrumentKind) Aggregation { + switch ik { + case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: + return AggregationSum{} + case InstrumentKindObservableGauge, InstrumentKindGauge: + return AggregationLastValue{} + case InstrumentKindHistogram: + return AggregationExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + NoMinMax: false, + } + } + panic("unknown instrument kind") +} + +// ReaderOption is an option which can be applied to manual or Periodic +// readers. +type ReaderOption interface { + PeriodicReaderOption + ManualReaderOption +} + +// WithProducer registers producers as an external Producer of metric data +// for this Reader. +func WithProducer(p Producer) ReaderOption { + return producerOption{p: p} +} + +type producerOption struct { + p Producer +} + +// applyManual returns a manualReaderConfig with option applied. +func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.producers = append(c.producers, o.p) + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.producers = append(c.producers, o.p) + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go new file mode 100644 index 000000000..44316caa1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go new file mode 100644 index 000000000..cd08c6732 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/global" +) + +var ( + errMultiInst = errors.New("name replacement for multiple instruments") + errEmptyView = errors.New("no criteria provided for view") + + emptyView = func(Instrument) (Stream, bool) { return Stream{}, false } +) + +// View is an override to the default behavior of the SDK. It defines how data +// should be collected for certain instruments. It returns true and the exact +// Stream to use for matching Instruments. Otherwise, if the view does not +// match, false is returned. +type View func(Instrument) (Stream, bool) + +// NewView returns a View that applies the Stream mask for all instruments that +// match criteria. The returned View will only apply mask if all non-zero-value +// fields of criteria match the corresponding Instrument passed to the view. If +// no criteria are provided, all field of criteria are their zero-values, a +// view that matches no instruments is returned. If you need to match a +// zero-value field, create a View directly. +// +// The Name field of criteria supports wildcard pattern matching. The "*" +// wildcard is recognized as matching zero or more characters, and "?" is +// recognized as matching exactly one character. For example, a pattern of "*" +// matches all instrument names. +// +// The Stream mask only applies updates for non-zero-value fields. By default, +// the Instrument the View matches against will be use for the Name, +// Description, and Unit of the returned Stream and no Aggregation or +// AttributeFilter are set. All non-zero-value fields of mask are used instead +// of the default. If you need to zero out an Stream field returned from a +// View, create a View directly. +func NewView(criteria Instrument, mask Stream) View { + if criteria.IsEmpty() { + global.Error( + errEmptyView, "dropping view", + "mask", mask, + ) + return emptyView + } + + var matchFunc func(Instrument) bool + if strings.ContainsAny(criteria.Name, "*?") { + if mask.Name != "" { + global.Error( + errMultiInst, "dropping view", + "criteria", criteria, + "mask", mask, + ) + return emptyView + } + + // Handle branching here in NewView instead of criteria.matches so + // criteria.matches remains inlinable for the simple case. + pattern := regexp.QuoteMeta(criteria.Name) + pattern = "^" + pattern + "$" + pattern = strings.ReplaceAll(pattern, `\?`, ".") + pattern = strings.ReplaceAll(pattern, `\*`, ".*") + re := regexp.MustCompile(pattern) + matchFunc = func(i Instrument) bool { + return re.MatchString(i.Name) && + criteria.matchesDescription(i) && + criteria.matchesKind(i) && + criteria.matchesUnit(i) && + criteria.matchesScope(i) + } + } else { + matchFunc = criteria.matches + } + + var agg Aggregation + if mask.Aggregation != nil { + agg = mask.Aggregation.copy() + if err := agg.err(); err != nil { + global.Error( + err, "not using aggregation with view", + "criteria", criteria, + "mask", mask, + ) + agg = nil + } + } + + return func(i Instrument) (Stream, bool) { + if matchFunc(i) { + return Stream{ + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + }, true + } + return Stream{}, false + } +} + +// nonZero returns v if it is non-zero-valued, otherwise alt. +func nonZero[T comparable](v, alt T) T { + var zero T + if v != zero { + return v + } + return alt +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 000000000..4ad864d71 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 000000000..95a61d61d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 000000000..6ac1cdbf7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 000000000..0d6e213d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 000000000..5ecd859a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 000000000..64939a271 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 000000000..813f05624 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 000000000..2d0f65498 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 000000000..cc8b8938e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 000000000..b09fde3b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 000000000..d9e5d1a8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 000000000..f84f17324 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 000000000..6354b3560 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 000000000..df12c44c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 000000000..71386e2da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 000000000..8a48ab4fa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 000000000..ce455dc54 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 000000000..f537e5ca5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 000000000..a6ff26a4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 000000000..a77742b07 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 000000000..5e3d199d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 000000000..085fe68fd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 000000000..ad4b50df4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 000000000..b7cede891 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index aa9e10171..1a820bdb3 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -309,5 +309,5 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bit size of 16 checked above. } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 000000000..2de1fc3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 000000000..d8dc822b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 000000000..d031bbea7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 000000000..bfaee0d56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 000000000..fcdb9f485 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 000000000..4c87c7adc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab..d49adf671 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf243..dc5e34cad 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab2896052..f67039ed1 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a..3ba611d71 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,15 +29,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.5.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: @@ -46,4 +47,3 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go index d40b622a4..9202510d2 100644 --- a/vendor/go.step.sm/crypto/pemutil/pem.go +++ b/vendor/go.step.sm/crypto/pemutil/pem.go @@ -640,7 +640,6 @@ func Serialize(in interface{}, opts ...Options) (*pem.Block, error) { } } else { var err error - //nolint:staticcheck // required for legacy compatibility p, err = x509.EncryptPEMBlock(rand.Reader, p.Type, p.Bytes, password, DefaultEncCipher) if err != nil { return nil, errors.Wrap(err, "failed to serialize to PEM") diff --git a/vendor/go.step.sm/crypto/pemutil/pkcs8.go b/vendor/go.step.sm/crypto/pemutil/pkcs8.go index 35d30db93..fb6c96c29 100644 --- a/vendor/go.step.sm/crypto/pemutil/pkcs8.go +++ b/vendor/go.step.sm/crypto/pemutil/pkcs8.go @@ -174,7 +174,6 @@ func (c rfc1423Algo) deriveKey(password, salt []byte, h func() hash.Hash) []byte // key derived using PBKDF2 over the given password. func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - //nolint:staticcheck // required for legacy compatibility return x509.DecryptPEMBlock(block, password) } diff --git a/vendor/goa.design/goa/v3/http/error.go b/vendor/goa.design/goa/v3/http/error.go index 07da10219..409f2b3dd 100644 --- a/vendor/goa.design/goa/v3/http/error.go +++ b/vendor/goa.design/goa/v3/http/error.go @@ -55,7 +55,7 @@ func NewErrorResponse(ctx context.Context, err error) Statuser { Fault: gerr.Fault, } } - return NewErrorResponse(ctx, goa.Fault(err.Error())) + return NewErrorResponse(ctx, goa.Fault("%s", err.Error())) } func (resp *ErrorResponse) MarshalXML(e *xml.Encoder, _ xml.StartElement) error { diff --git a/vendor/goa.design/goa/v3/http/server.go b/vendor/goa.design/goa/v3/http/server.go index 396928e72..2bfc843f8 100644 --- a/vendor/goa.design/goa/v3/http/server.go +++ b/vendor/goa.design/goa/v3/http/server.go @@ -2,8 +2,6 @@ package http import ( "net/http" - "net/url" - "strings" ) type ( @@ -38,32 +36,3 @@ func (s Servers) Mount(mux Muxer) { m.Mount(mux) } } - -// Replace returns a handler that serves HTTP requests by replacing the -// request URL's Path (and RawPath if set) and invoking the handler h. -// The logic is the same as the standard http package StripPrefix function. -func Replace(old, nw string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var p, rp string - if old != "" { - p = strings.Replace(r.URL.Path, old, nw, 1) - rp = strings.Replace(r.URL.RawPath, old, nw, 1) - } else { - p = nw - if r.URL.RawPath != "" { - rp = nw - } - } - if p != r.URL.Path && (r.URL.RawPath == "" || rp != r.URL.RawPath) { - r2 := new(http.Request) - *r2 = *r - r2.URL = new(url.URL) - *r2.URL = *r.URL - r2.URL.Path = p - r2.URL.RawPath = rp - h.ServeHTTP(w, r2) - } else { - http.NotFound(w, r) - } - }) -} diff --git a/vendor/goa.design/goa/v3/pkg/error.go b/vendor/goa.design/goa/v3/pkg/error.go index d24dd1083..7c1156142 100644 --- a/vendor/goa.design/goa/v3/pkg/error.go +++ b/vendor/goa.design/goa/v3/pkg/error.go @@ -116,7 +116,7 @@ func MissingPayloadError() error { // DecodePayloadError is the error produced by the generated code when a request // body cannot be decoded successfully. func DecodePayloadError(msg string) error { - return PermanentError("decode_payload", msg) + return PermanentError("decode_payload", "%s", msg) } // UnsupportedMediaTypeError is the error produced by the Goa decoder when the @@ -162,7 +162,7 @@ func InvalidFormatError(name, target string, format Format, formatError error) e // InvalidPatternError is the error produced by the generated code when the // value of a payload field does not match the pattern validation defined in the // design. -func InvalidPatternError(name, target string, pattern string) error { +func InvalidPatternError(name, target, pattern string) error { return withField(name, PermanentError( InvalidPattern, "%s must match the regexp %q but got value %q", name, pattern, target)) } @@ -170,7 +170,7 @@ func InvalidPatternError(name, target string, pattern string) error { // InvalidRangeError is the error produced by the generated code when the value // of a payload field does not match the range validation defined in the design. // value may be an int or a float64. -func InvalidRangeError(name string, target any, value any, min bool) error { +func InvalidRangeError(name string, target, value any, min bool) error { comp := "greater or equal" if !min { comp = "lesser or equal" diff --git a/vendor/goa.design/goa/v3/pkg/version.go b/vendor/goa.design/goa/v3/pkg/version.go index fbc21c503..3c8bef311 100644 --- a/vendor/goa.design/goa/v3/pkg/version.go +++ b/vendor/goa.design/goa/v3/pkg/version.go @@ -10,9 +10,9 @@ const ( // Major version number Major = 3 // Minor version number - Minor = 18 + Minor = 19 // Build number - Build = 2 + Build = 1 // Suffix - set to empty string in release tag commits. Suffix = "" ) diff --git a/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go b/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go index 45d28e88c..4a20fff2b 100644 --- a/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go +++ b/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go @@ -136,7 +136,9 @@ const Scheme = "kafka" // URLOpener opens Kafka URLs like "kafka://mytopic" for topics and // "kafka://group?topic=mytopic" for subscriptions. // -// For topics, the URL's host+path is used as the topic name. +// For topics, the URL's host+path is used as the topic name, +// and the "key_name" query parameter is used to extract the routing key +// from metadata. // // For subscriptions, the URL's host+path is used as the group name, // and the "topic" query parameter(s) are used as the set of topics to @@ -159,9 +161,19 @@ type URLOpener struct { // OpenTopicURL opens a pubsub.Topic based on u. func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) { - for param := range u.Query() { - return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + for param, value := range u.Query() { + switch param { + case "key_name": + if len(value) != 1 || len(value[0]) == 0 { + return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + } + + o.TopicOptions.KeyName = value[0] + default: + return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + } } + topicName := path.Join(u.Host, u.Path) return OpenTopic(o.Brokers, o.Config, topicName, &o.TopicOptions) } diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713accac..c3895478e 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c2..f75162e03 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 - RET +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c1..9a0ce2124 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 - RET +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 731d2ac6d..fd5ee845f 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -1,2715 +1,9762 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. +// Code generated by command: go run chacha20poly1305_amd64_asm.go -out ../chacha20poly1305_amd64.s -pkg chacha20poly1305. DO NOT EDIT. //go:build gc && !purego #include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 - -// Some macros - -// ROL rotates the uint32s in register R left by N bits, using temporary T. -#define ROL(N, R, T) \ - MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R - -// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL16(R, T) PSHUFB ·rol16<>(SB), R -#else -#define ROL16(R, T) ROL(16, R, T) -#endif - -// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL8(R, T) PSHUFB ·rol8<>(SB), R -#else -#define ROL8(R, T) ROL(8, R, T) -#endif - -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; ROL16(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; ROL8(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- + +// func polyHashADInternal<>() TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul + // Hack: Must declare #define macros inside of a function due to Avo constraints + // ROL rotates the uint32s in register R left by N bits, using temporary T. + #define ROL(N, R, T) \ + MOVO R, T; \ + PSLLL $(N), T; \ + PSRLL $(32-(N)), R; \ + PXOR T, R + + // ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL8(R, T) PSHUFB ·rol8<>(SB), R + #else + #define ROL8(R, T) ROL(8, R, T) + #endif + + // ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL16(R, T) PSHUFB ·rol16<>(SB), R + #else + #define ROL16(R, T) ROL(16, R, T) + #endif + XORQ R10, R10 + XORQ R11, R11 + XORQ R12, R12 + CMPQ R9, $0x0d + JNE hashADLoop + MOVQ (CX), R10 + MOVQ 5(CX), R11 + SHRQ $0x18, R11 + MOVQ $0x00000001, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 RET hashADLoop: // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop + CMPQ R9, $0x10 + JB hashADTail + ADDQ (CX), R10 + ADCQ 8(CX), R11 + ADCQ $0x01, R12 + LEAQ 16(CX), CX + SUBQ $0x10, R9 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP hashADLoop hashADTail: - CMPQ itr2, $0 + CMPQ R9, $0x00 JE hashADDone // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp + XORQ R13, R13 + XORQ R14, R14 + XORQ R15, R15 + ADDQ R9, CX hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD + SHLQ $0x08, R13, R14 + SHLQ $0x08, R13 + MOVB -1(CX), R15 + XORQ R15, R13 + DECQ CX + DECQ R9 + JNE hashADTailLoop + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + hashADDone: RET -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 +// func chacha20Poly1305Open(dst []byte, key []uint32, src []byte, ad []byte) bool +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Open(SB), $288-97 // For aligned stack access MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX // Check for AVX2 support - CMPB ·useAVX2(SB), $1 + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Open_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster + CMPQ BX, $0x80 + JBE openSSE128 // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X9, X13 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 + MOVO X3, 32(BP) + MOVO X6, 48(BP) + MOVO X9, 128(BP) + MOVQ $0x0000000a, R9 openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + DECQ R9 + JNE openSSEPreparePolyKey // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSEMainLoop: - CMPQ inl, $256 + CMPQ BX, $0x00000100 JB openSSEMainLoopDone // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash + // 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $0x00000004, CX + MOVQ SI, R9 openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(R9), R9 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ CX + JGE openSSEInternalLoop + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + CMPQ CX, $-6 + JG openSSEInternalLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl + MOVO X15, 64(BP) + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU X0, (DI) + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU X3, 16(DI) + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU X6, 32(DI) + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X9, 48(DI) + MOVOU 64(SI), X9 + PXOR X9, X1 + MOVOU X1, 64(DI) + MOVOU 80(SI), X9 + PXOR X9, X4 + MOVOU X4, 80(DI) + MOVOU 96(SI), X9 + PXOR X9, X7 + MOVOU X7, 96(DI) + MOVOU 112(SI), X9 + PXOR X9, X10 + MOVOU X10, 112(DI) + MOVOU 128(SI), X9 + PXOR X9, X2 + MOVOU X2, 128(DI) + MOVOU 144(SI), X9 + PXOR X9, X5 + MOVOU X5, 144(DI) + MOVOU 160(SI), X9 + PXOR X9, X8 + MOVOU X8, 160(DI) + MOVOU 176(SI), X9 + PXOR X9, X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X9 + PXOR X9, X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X9 + PXOR X9, X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X9 + PXOR X9, X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X9 + PXOR 64(BP), X9 + MOVOU X9, 240(DI) + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openSSEMainLoop openSSEMainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $64 + CMPQ BX, $0x40 JBE openSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openSSETail128 - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openSSETail192 JMP openSSETail256 openSSEFinalize: // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally, constant time compare to the tag at the end of the message XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 + MOVQ $0x00000001, DX + XORQ (SI), R10 + XORQ 8(SI), R11 + ORQ R11, R10 CMOVQEQ DX, AX // Return true iff tags are equal MOVB AX, ret+96(FP) RET -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE openSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSE128Open: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail16 - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0(inp)) + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP openSSE128Open openSSETail16: - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVOU (SI), X12 + ADDQ BX, SI + PAND -16(R13)(R9*1), X12 + MOVO X12, 64(BP) + MOVQ X12, R13 + MOVQ 72(BP), R14 + PXOR X1, X12 // We can only store one byte at a time, since plaintext can be shorter than 16 bytes openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl + MOVQ X12, R8 + MOVB R8, (DI) + PSRLDQ $0x01, X12 + INCQ DI + DECQ BX JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 JMP openSSEFinalize -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + XORQ R9, R9 + MOVQ BX, CX + CMPQ CX, $0x10 + JB openSSETail64LoopB openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + CMPQ CX, $0x10 + JAE openSSETail64LoopA + CMPQ R9, $0xa0 + JNE openSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + PADDL 48(BP), X6 + PADDL 80(BP), X9 openSSETail64DecLoop: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 + SUBQ $0x10, BX + MOVOU (SI), X12 + PXOR X12, X0 + MOVOU X0, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVO X3, X0 + MOVO X6, X3 + MOVO X9, X6 JMP openSSETail64DecLoop openSSETail64DecLoopDone: - MOVO A0, A1 + MOVO X0, X1 JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 96(BP) + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + CMPQ R9, CX + JB openSSETail128LoopA + CMPQ R9, $0xa0 + JNE openSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 96(BP), X9 + PADDL 80(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + SUBQ $0x40, BX + LEAQ 64(SI), SI + LEAQ 64(DI), DI + JMP openSSETail64DecLoop + openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 + MOVO ·chacha20Constants<>+0(SB), X2 + MOVO 32(BP), X5 + MOVO 48(BP), X8 + MOVO 128(BP), X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 80(BP) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 112(BP) + MOVQ BX, CX + MOVQ $0x000000a0, R9 + CMPQ CX, $0xa0 + CMOVQGT R9, CX + ANDQ $-16, CX + XORQ R9, R9 openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + CMPQ R9, CX + JB openSSLTail192LoopA + CMPQ R9, $0xa0 + JNE openSSLTail192LoopB + CMPQ BX, $0xb0 + JB openSSLTail192Store + ADDQ 160(SI), R10 + ADCQ 168(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + CMPQ BX, $0xc0 + JB openSSLTail192Store + ADDQ 176(SI), R10 + ADCQ 184(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 112(BP), X9 + PADDL 96(BP), X10 + PADDL 80(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X2 + PXOR X13, X5 + PXOR X14, X8 + PXOR X15, X11 + MOVOU X2, (DI) + MOVOU X5, 16(DI) + MOVOU X8, 32(DI) + MOVOU X11, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + LEAQ 128(DI), DI + JMP openSSETail64DecLoop + openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + XORQ R9, R9 openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + ADDQ $0x10, R9 + CMPQ R9, $0xa0 + JB openSSETail256Loop + MOVQ BX, CX + ANDQ $-16, CX openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, R9 + CMPQ R9, CX + JB openSSETail256HashLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + LEAQ 192(SI), SI + LEAQ 192(DI), DI + SUBQ $0xc0, BX + MOVO X12, X0 + MOVO X13, X3 + MOVO X14, X6 + MOVO 64(BP), X9 + JMP openSSETail64DecLoop + chacha20Poly1305Open_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimization, for very short buffers - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openAVX2192 - CMPQ inl, $320 + CMPQ BX, $0x00000140 JBE openAVX2320 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, 64(BP) + VMOVDQA Y4, 192(BP) + MOVQ $0x0000000a, R9 openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ R9 + JNE openAVX2PreparePolyKey + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD 32(BP), Y14, Y14 + VPADDD 64(BP), Y12, Y12 + VPADDD 192(BP), Y4, Y4 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, CX + CMPQ CX, $0x40 + JNE openAVX2InitialHash64 // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VMOVDQU Y0, (DI) + VMOVDQU Y14, 32(DI) + LEAQ 64(SI), SI + LEAQ 64(DI), DI + SUBQ $0x40, BX openAVX2MainLoop: - CMPQ inl, $512 + CMPQ BX, $0x00000200 JB openAVX2MainLoopDone // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(SI)(CX*1), R10 + ADCQ 24(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(SI)(CX*1), R10 + ADCQ 40(SI)(CX*1), R11 + ADCQ $0x01, R12 + LEAQ 48(CX), CX + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + CMPQ CX, $0x000001e0 JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ 480(SI), R10 + ADCQ 488(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl + ADDQ 496(SI), R10 + ADCQ 504(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + LEAQ 512(DI), DI + SUBQ $0x00000200, BX JMP openAVX2MainLoop openAVX2MainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openAVX2Tail128 - CMPQ inl, $256 + CMPQ BX, $0x00000100 JBE openAVX2Tail256 - CMPQ inl, $384 + CMPQ BX, $0x00000180 JBE openAVX2Tail384 JMP openAVX2Tail512 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 openAVX2ShortOpen: // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openAVX2ShortOpenLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(SI), R10 + ADCQ 24(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP openAVX2ShortOpenLoop openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2ShortDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP openAVX2ShortOpen -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openAVX2Tail128: // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y1 + VMOVDQA Y1, Y4 + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + TESTQ CX, CX + JE openAVX2Tail128LoopB openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + ADDQ $0x10, R9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail128LoopA + CMPQ R9, $0xa0 + JNE openAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y13, Y13 + VPADDD Y4, Y1, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 openAVX2TailLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2Tail - SUBQ $32, inl + SUBQ $0x20, BX // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 JMP openAVX2TailLoop openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2TailDone - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2TailDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x80, CX + SHRQ $0x04, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX - // Perform ChaCha rounds, while hashing the remaining input openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX JB openAVX2Tail256LoopA + CMPQ R9, $0x0a + JNE openAVX2Tail256LoopB + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail256HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail256Hash + openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y6 + VPERM2I128 $0x02, Y12, Y4, Y10 + VPERM2I128 $0x13, Y0, Y14, Y8 + VPERM2I128 $0x13, Y12, Y4, Y2 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR (SI), Y6, Y6 + VPXOR 32(SI), Y10, Y10 + VPXOR 64(SI), Y8, Y8 + VPXOR 96(SI), Y2, Y2 + VMOVDQU Y6, (DI) + VMOVDQU Y10, 32(DI) + VMOVDQU Y8, 64(DI) + VMOVDQU Y2, 96(DI) + LEAQ 128(SI), SI + LEAQ 128(DI), DI + SUBQ $0x80, BX + JMP openAVX2TailLoop + openAVX2Tail384: // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x00000100, CX + SHRQ $0x04, CX + ADDQ $0x06, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + CMPQ R9, CX + JB openAVX2Tail384LoopB + CMPQ R9, $0x0a + JNE openAVX2Tail384LoopA + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail384HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail384Hash + openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openAVX2TailLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + MOVQ SI, R9 openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(R9), R10 + ADCQ 24(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(R9), R9 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + INCQ CX + CMPQ CX, $0x04 JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 + CMPQ CX, $0x0a + JNE openAVX2Tail512LoopA + MOVQ BX, CX + SUBQ $0x00000180, CX + ANDQ $-16, CX openAVX2Tail512HashLoop: - TESTQ itr1, itr1 + TESTQ CX, CX JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + SUBQ $0x10, CX JMP openAVX2Tail512HashLoop openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + LEAQ 384(SI), SI + LEAQ 384(DI), DI + SUBQ $0x00000180, BX + JMP openAVX2TailLoop + +DATA ·chacha20Constants<>+0(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+4(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+8(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+12(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+16(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+20(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+24(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+28(SB)/4, $0x6b206574 +GLOBL ·chacha20Constants<>(SB), RODATA|NOPTR, $32 + +DATA ·polyClampMask<>+0(SB)/8, $0x0ffffffc0fffffff +DATA ·polyClampMask<>+8(SB)/8, $0x0ffffffc0ffffffc +DATA ·polyClampMask<>+16(SB)/8, $0xffffffffffffffff +DATA ·polyClampMask<>+24(SB)/8, $0xffffffffffffffff +GLOBL ·polyClampMask<>(SB), RODATA|NOPTR, $32 + +DATA ·sseIncMask<>+0(SB)/8, $0x0000000000000001 +DATA ·sseIncMask<>+8(SB)/8, $0x0000000000000000 +GLOBL ·sseIncMask<>(SB), RODATA|NOPTR, $16 + +DATA ·andMask<>+0(SB)/8, $0x00000000000000ff +DATA ·andMask<>+8(SB)/8, $0x0000000000000000 +DATA ·andMask<>+16(SB)/8, $0x000000000000ffff +DATA ·andMask<>+24(SB)/8, $0x0000000000000000 +DATA ·andMask<>+32(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+40(SB)/8, $0x0000000000000000 +DATA ·andMask<>+48(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+56(SB)/8, $0x0000000000000000 +DATA ·andMask<>+64(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+72(SB)/8, $0x0000000000000000 +DATA ·andMask<>+80(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+88(SB)/8, $0x0000000000000000 +DATA ·andMask<>+96(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+104(SB)/8, $0x0000000000000000 +DATA ·andMask<>+112(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+120(SB)/8, $0x0000000000000000 +DATA ·andMask<>+128(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+136(SB)/8, $0x00000000000000ff +DATA ·andMask<>+144(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+152(SB)/8, $0x000000000000ffff +DATA ·andMask<>+160(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+168(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+176(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+184(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+192(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+200(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+208(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+216(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+224(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+232(SB)/8, $0x00ffffffffffffff +GLOBL ·andMask<>(SB), RODATA|NOPTR, $240 + +DATA ·avx2InitMask<>+0(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+16(SB)/8, $0x0000000000000001 +DATA ·avx2InitMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2InitMask<>(SB), RODATA|NOPTR, $32 + +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0d0c0f0e09080b0a +DATA ·rol16<>+16(SB)/8, $0x0504070601000302 +DATA ·rol16<>+24(SB)/8, $0x0d0c0f0e09080b0a +GLOBL ·rol16<>(SB), RODATA|NOPTR, $32 + +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0e0d0c0f0a09080b +DATA ·rol8<>+16(SB)/8, $0x0605040702010003 +DATA ·rol8<>+24(SB)/8, $0x0e0d0c0f0a09080b +GLOBL ·rol8<>(SB), RODATA|NOPTR, $32 + +DATA ·avx2IncMask<>+0(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2IncMask<>+16(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2IncMask<>(SB), RODATA|NOPTR, $32 + +// func chacha20Poly1305Seal(dst []byte, key []uint32, src []byte, ad []byte) +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Seal(SB), $288-96 MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Seal_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster + CMPQ BX, $0x80 + JBE sealSSE128 // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store + MOVO X3, 32(BP) + MOVO X6, 48(BP) // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + MOVQ $0x0000000a, R9 sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JNE sealSSEIntroLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 64(DI) + MOVOU X5, 80(DI) + MOVOU X8, 96(DI) + MOVOU X11, 112(DI) + MOVQ $0x00000080, CX + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 128(DI) + MOVOU X13, 144(DI) + MOVOU X14, 160(DI) + MOVOU X15, 176(DI) + ADDQ $0x40, CX + SUBQ $0x40, BX + LEAQ 64(SI), SI + MOVQ $0x00000002, CX + MOVQ $0x00000008, R9 + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + CMPQ BX, $0xc0 + JBE sealSSETail192 sealSSEMainLoop: // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(DI), DI + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JGE sealSSEInnerLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSEInnerLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVO 64(BP), X15 + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + ADDQ $0xc0, SI + MOVQ $0x000000c0, CX + SUBQ $0xc0, BX + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + LEAQ 64(SI), SI + SUBQ $0x40, BX + MOVQ $0x00000006, CX + MOVQ $0x00000004, R9 + CMPQ BX, $0xc0 JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl + MOVQ BX, CX + TESTQ BX, BX JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 + MOVQ $0x00000006, CX + CMPQ BX, $0x40 JBE sealSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE sealSSETail128 JMP sealSSETail192 -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSETail64LoopA + DECQ R9 JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X4 + PADDL 48(BP), X7 + PADDL 80(BP), X10 + JMP sealSSE128Seal - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + DECQ CX + JG sealSSETail128LoopA + DECQ R9 + JGE sealSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVQ $0x00000040, CX + LEAQ 64(SI), SI + SUBQ $0x40, BX + JMP sealSSE128SealHash + sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 112(BP) sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ CX + JG sealSSETail192LoopA + DECQ R9 + JGE sealSSETail192LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + PADDL 112(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + JMP sealSSE128SealHash + sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE sealSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash + CMPQ CX, $0x10 + JB sealSSE128Seal + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealSSE128SealHash sealSSE128Seal: - CMPQ inl, $16 + CMPQ BX, $0x10 JB sealSSETail - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP sealSSE128Seal sealSSETail: - TESTQ inl, inl + TESTQ BX, BX JE sealSSEFinalize // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVQ BX, CX + LEAQ -1(SI)(BX*1), SI + XORQ R15, R15 + XORQ R8, R8 XORQ AX, AX sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 + SHLQ $0x08, R15, R8 + SHLQ $0x08, R15 + MOVB (SI), AX + XORQ AX, R15 + LEAQ -1(SI), SI + DECQ CX JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup + MOVQ R15, 64(BP) + MOVQ R8, 72(BP) + PXOR 64(BP), X1 + MOVOU X1, (DI) + MOVOU -16(R13)(R9*1), X12 + PAND X12, X1 + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ BX, DI sealSSEFinalize: // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) + MOVQ R10, (DI) + MOVQ R11, 8(DI) RET -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- chacha20Poly1305Seal_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster + CMPQ BX, $0x000000c0 + JBE seal192AVX2 + CMPQ BX, $0x00000140 + JBE seal320AVX2 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA Y12, 64(BP) + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, 96(BP) + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y1, 128(BP) + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, R9 sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ R9 + JNE sealAVX2IntroLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPERM2I128 $0x02, Y0, Y14, Y4 + VPERM2I128 $0x13, Y0, Y14, Y0 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, (BP) // Hash AD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y12, Y12 + VMOVDQU Y0, (DI) + VMOVDQU Y12, 32(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 64(SI), Y0, Y0 + VPXOR 96(SI), Y14, Y14 + VPXOR 128(SI), Y12, Y12 + VPXOR 160(SI), Y4, Y4 + VMOVDQU Y0, 64(DI) + VMOVDQU Y14, 96(DI) + VMOVDQU Y12, 128(DI) + VMOVDQU Y4, 160(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 192(SI), Y0, Y0 + VPXOR 224(SI), Y14, Y14 + VPXOR 256(SI), Y12, Y12 + VPXOR 288(SI), Y4, Y4 + VMOVDQU Y0, 192(DI) + VMOVDQU Y14, 224(DI) + VMOVDQU Y12, 256(DI) + VMOVDQU Y4, 288(DI) + MOVQ $0x00000140, CX + SUBQ $0x00000140, BX + LEAQ 320(SI), SI + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, Y15, Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, Y15, Y3, Y4 + CMPQ BX, $0x80 JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VPXOR 64(SI), Y12, Y12 + VPXOR 96(SI), Y4, Y4 + VMOVDQU Y0, 320(DI) + VMOVDQU Y14, 352(DI) + VMOVDQU Y12, 384(DI) + VMOVDQU Y4, 416(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVQ $0x00000008, CX + MOVQ $0x00000002, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + CMPQ BX, $0x00000200 + JBE sealAVX2Tail512 // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + SUBQ $0x10, DI + MOVQ $0x00000009, CX + JMP sealAVX2InternalLoopStart sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, CX sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(DI), R10 + ADCQ 40(DI), R11 + ADCQ $0x01, R12 + LEAQ 48(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 + ADDQ -16(DI), R10 + ADCQ -8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + SUBQ $0x00000200, BX + CMPQ BX, $0x00000200 JG sealAVX2MainLoop // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + MOVQ $0x0000000a, CX + MOVQ $0x00000000, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 sealAVX2ShortSeal: // Hash aad - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealAVX2SealHash: // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash + CMPQ CX, $0x10 + JB sealAVX2ShortSealLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealAVX2SealHash sealAVX2ShortSealLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB sealAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP sealAVX2ShortSealLoop sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB sealAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 sealAVX2ShortDone: VZEROUPPER JMP sealSSETail -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP sealAVX2ShortSeal -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA 32(BP), Y14 + VMOVDQA 64(BP), Y12 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, Y1 sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ CX + JG sealAVX2Tail128LoopA + DECQ R9 + JGE sealAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y5 + VPADDD 32(BP), Y14, Y9 + VPADDD 64(BP), Y12, Y13 + VPADDD Y1, Y4, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 JMP sealAVX2ShortSealLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ CX + JG sealAVX2Tail256LoopA + DECQ R9 + JGE sealAVX2Tail256LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2SealHash + sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + VMOVDQA Y2, Y15 sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ CX + JG sealAVX2Tail384LoopA + DECQ R9 + JGE sealAVX2Tail384LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPADDD Y15, Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + MOVQ $0x00000100, CX + LEAQ 256(SI), SI + SUBQ $0x00000100, BX + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + JMP sealAVX2SealHash + sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JG sealAVX2Tail512LoopA + DECQ R9 + JGE sealAVX2Tail512LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPXOR (SI), Y15, Y15 + VMOVDQU Y15, (DI) + VPERM2I128 $0x02, Y12, Y4, Y15 + VPXOR 32(SI), Y15, Y15 + VMOVDQU Y15, 32(DI) + VPERM2I128 $0x13, Y0, Y14, Y15 + VPXOR 64(SI), Y15, Y15 + VMOVDQU Y15, 64(DI) + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + MOVQ $0x00000180, CX + LEAQ 384(SI), SI + SUBQ $0x00000180, BX + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c6475..133757384 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s index fcce0234b..3883e0ec2 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -1,880 +1,880 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run salsa20_amd64_asm.go -out ../salsa20_amd64.s -pkg salsa. DO NOT EDIT. //go:build amd64 && !purego && gc -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html +// func salsa2020XORKeyStream(out *byte, in *byte, n uint64, nonce *byte, key *byte) +// Requires: SSE2 +TEXT ·salsa2020XORKeyStream(SB), $456-40 + // This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + MOVQ n+16(FP), DX + MOVQ nonce+24(FP), CX + MOVQ key+32(FP), R8 + MOVQ SP, R12 + ADDQ $0x1f, R12 + ANDQ $-32, R12 + MOVQ DX, R9 + MOVQ CX, DX + MOVQ R8, R10 + CMPQ R9, $0x00 + JBE DONE + MOVL 20(R10), CX + MOVL (R10), R8 + MOVL (DX), AX + MOVL 16(R10), R11 + MOVL CX, (R12) + MOVL R8, 4(R12) + MOVL AX, 8(R12) + MOVL R11, 12(R12) + MOVL 8(DX), CX + MOVL 24(R10), R8 + MOVL 4(R10), AX + MOVL 4(DX), R11 + MOVL CX, 16(R12) + MOVL R8, 20(R12) + MOVL AX, 24(R12) + MOVL R11, 28(R12) + MOVL 12(DX), CX + MOVL 12(R10), DX + MOVL 28(R10), R8 + MOVL 8(R10), AX + MOVL DX, 32(R12) + MOVL CX, 36(R12) + MOVL R8, 40(R12) + MOVL AX, 44(R12) + MOVQ $0x61707865, DX + MOVQ $0x3320646e, CX + MOVQ $0x79622d32, R8 + MOVQ $0x6b206574, AX + MOVL DX, 48(R12) + MOVL CX, 52(R12) + MOVL R8, 56(R12) + MOVL AX, 60(R12) + CMPQ R9, $0x00000100 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12), X0 + PSHUFL $0x55, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X3 + PSHUFL $0x00, X0, X0 + MOVOA X1, 64(R12) + MOVOA X2, 80(R12) + MOVOA X3, 96(R12) + MOVOA X0, 112(R12) + MOVOA (R12), X0 + PSHUFL $0xaa, X0, X1 + PSHUFL $0xff, X0, X2 + PSHUFL $0x00, X0, X3 + PSHUFL $0x55, X0, X0 + MOVOA X1, 128(R12) + MOVOA X2, 144(R12) + MOVOA X3, 160(R12) + MOVOA X0, 176(R12) + MOVOA 16(R12), X0 + PSHUFL $0xff, X0, X1 + PSHUFL $0x55, X0, X2 + PSHUFL $0xaa, X0, X0 + MOVOA X1, 192(R12) + MOVOA X2, 208(R12) + MOVOA X0, 224(R12) + MOVOA 32(R12), X0 + PSHUFL $0x00, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X0 + MOVOA X1, 240(R12) + MOVOA X2, 256(R12) + MOVOA X0, 272(R12) -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. -TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 +BYTESATLEAST256: + MOVL 16(R12), DX + MOVL 36(R12), CX + MOVL DX, 288(R12) + MOVL CX, 304(R12) + SHLQ $0x20, CX + ADDQ CX, DX + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 292(R12) + MOVL CX, 308(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 296(R12) + MOVL CX, 312(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 300(R12) + MOVL CX, 316(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 16(R12) + MOVL CX, 36(R12) + MOVQ R9, 352(R12) + MOVQ $0x00000014, DX + MOVOA 64(R12), X0 + MOVOA 80(R12), X1 + MOVOA 96(R12), X2 + MOVOA 256(R12), X3 + MOVOA 272(R12), X4 + MOVOA 128(R12), X5 + MOVOA 144(R12), X6 + MOVOA 176(R12), X7 + MOVOA 192(R12), X8 + MOVOA 208(R12), X9 + MOVOA 224(R12), X10 + MOVOA 304(R12), X11 + MOVOA 112(R12), X12 + MOVOA 160(R12), X13 + MOVOA 240(R12), X14 + MOVOA 288(R12), X15 - MOVQ SP,R12 - ADDQ $31, R12 - ANDQ $~31, R12 +MAINLOOP1: + MOVOA X1, 320(R12) + MOVOA X2, 336(R12) + MOVOA X13, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X14 + PSRLL $0x19, X2 + PXOR X2, X14 + MOVOA X7, X1 + PADDL X0, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X11 + PSRLL $0x19, X2 + PXOR X2, X11 + MOVOA X12, X1 + PADDL X14, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X15 + PSRLL $0x17, X2 + PXOR X2, X15 + MOVOA X0, X1 + PADDL X11, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X9 + PSRLL $0x17, X2 + PXOR X2, X9 + MOVOA X14, X1 + PADDL X15, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X13 + PSRLL $0x13, X2 + PXOR X2, X13 + MOVOA X11, X1 + PADDL X9, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X7 + PSRLL $0x13, X2 + PXOR X2, X7 + MOVOA X15, X1 + PADDL X13, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA 320(R12), X1 + MOVOA X12, 320(R12) + MOVOA X9, X2 + PADDL X7, X2 + MOVOA X2, X12 + PSLLL $0x12, X2 + PXOR X2, X0 + PSRLL $0x0e, X12 + PXOR X12, X0 + MOVOA X5, X2 + PADDL X1, X2 + MOVOA X2, X12 + PSLLL $0x07, X2 + PXOR X2, X3 + PSRLL $0x19, X12 + PXOR X12, X3 + MOVOA 336(R12), X2 + MOVOA X0, 336(R12) + MOVOA X6, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X4 + PSRLL $0x19, X12 + PXOR X12, X4 + MOVOA X1, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X10 + PSRLL $0x17, X12 + PXOR X12, X10 + MOVOA X2, X0 + PADDL X4, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X8 + PSRLL $0x17, X12 + PXOR X12, X8 + MOVOA X3, X0 + PADDL X10, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X5 + PSRLL $0x13, X12 + PXOR X12, X5 + MOVOA X4, X0 + PADDL X8, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X6 + PSRLL $0x13, X12 + PXOR X12, X6 + MOVOA X10, X0 + PADDL X5, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA 320(R12), X0 + MOVOA X1, 320(R12) + MOVOA X4, X1 + PADDL X0, X1 + MOVOA X1, X12 + PSLLL $0x07, X1 + PXOR X1, X7 + PSRLL $0x19, X12 + PXOR X12, X7 + MOVOA X8, X1 + PADDL X6, X1 + MOVOA X1, X12 + PSLLL $0x12, X1 + PXOR X1, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 336(R12), X12 + MOVOA X2, 336(R12) + MOVOA X14, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X5 + PSRLL $0x19, X2 + PXOR X2, X5 + MOVOA X0, X1 + PADDL X7, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X10 + PSRLL $0x17, X2 + PXOR X2, X10 + MOVOA X12, X1 + PADDL X5, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X8 + PSRLL $0x17, X2 + PXOR X2, X8 + MOVOA X7, X1 + PADDL X10, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X4 + PSRLL $0x13, X2 + PXOR X2, X4 + MOVOA X5, X1 + PADDL X8, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X14 + PSRLL $0x13, X2 + PXOR X2, X14 + MOVOA X10, X1 + PADDL X4, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X0 + PSRLL $0x0e, X2 + PXOR X2, X0 + MOVOA 320(R12), X1 + MOVOA X0, 320(R12) + MOVOA X8, X0 + PADDL X14, X0 + MOVOA X0, X2 + PSLLL $0x12, X0 + PXOR X0, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA X11, X0 + PADDL X1, X0 + MOVOA X0, X2 + PSLLL $0x07, X0 + PXOR X0, X6 + PSRLL $0x19, X2 + PXOR X2, X6 + MOVOA 336(R12), X2 + MOVOA X12, 336(R12) + MOVOA X3, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X13 + PSRLL $0x19, X12 + PXOR X12, X13 + MOVOA X1, X0 + PADDL X6, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X15 + PSRLL $0x17, X12 + PXOR X12, X15 + MOVOA X2, X0 + PADDL X13, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X9 + PSRLL $0x17, X12 + PXOR X12, X9 + MOVOA X6, X0 + PADDL X15, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X11 + PSRLL $0x13, X12 + PXOR X12, X11 + MOVOA X13, X0 + PADDL X9, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X3 + PSRLL $0x13, X12 + PXOR X12, X3 + MOVOA X15, X0 + PADDL X11, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA X9, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 320(R12), X12 + MOVOA 336(R12), X0 + SUBQ $0x02, DX + JA MAINLOOP1 + PADDL 112(R12), X12 + PADDL 176(R12), X7 + PADDL 224(R12), X10 + PADDL 272(R12), X4 + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL (SI), DX + XORL 4(SI), CX + XORL 8(SI), R8 + XORL 12(SI), R9 + MOVL DX, (DI) + MOVL CX, 4(DI) + MOVL R8, 8(DI) + MOVL R9, 12(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 64(SI), DX + XORL 68(SI), CX + XORL 72(SI), R8 + XORL 76(SI), R9 + MOVL DX, 64(DI) + MOVL CX, 68(DI) + MOVL R8, 72(DI) + MOVL R9, 76(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 128(SI), DX + XORL 132(SI), CX + XORL 136(SI), R8 + XORL 140(SI), R9 + MOVL DX, 128(DI) + MOVL CX, 132(DI) + MOVL R8, 136(DI) + MOVL R9, 140(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + XORL 192(SI), DX + XORL 196(SI), CX + XORL 200(SI), R8 + XORL 204(SI), R9 + MOVL DX, 192(DI) + MOVL CX, 196(DI) + MOVL R8, 200(DI) + MOVL R9, 204(DI) + PADDL 240(R12), X14 + PADDL 64(R12), X0 + PADDL 128(R12), X5 + PADDL 192(R12), X8 + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 16(SI), DX + XORL 20(SI), CX + XORL 24(SI), R8 + XORL 28(SI), R9 + MOVL DX, 16(DI) + MOVL CX, 20(DI) + MOVL R8, 24(DI) + MOVL R9, 28(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 80(SI), DX + XORL 84(SI), CX + XORL 88(SI), R8 + XORL 92(SI), R9 + MOVL DX, 80(DI) + MOVL CX, 84(DI) + MOVL R8, 88(DI) + MOVL R9, 92(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 144(SI), DX + XORL 148(SI), CX + XORL 152(SI), R8 + XORL 156(SI), R9 + MOVL DX, 144(DI) + MOVL CX, 148(DI) + MOVL R8, 152(DI) + MOVL R9, 156(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + XORL 208(SI), DX + XORL 212(SI), CX + XORL 216(SI), R8 + XORL 220(SI), R9 + MOVL DX, 208(DI) + MOVL CX, 212(DI) + MOVL R8, 216(DI) + MOVL R9, 220(DI) + PADDL 288(R12), X15 + PADDL 304(R12), X11 + PADDL 80(R12), X1 + PADDL 144(R12), X6 + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 32(SI), DX + XORL 36(SI), CX + XORL 40(SI), R8 + XORL 44(SI), R9 + MOVL DX, 32(DI) + MOVL CX, 36(DI) + MOVL R8, 40(DI) + MOVL R9, 44(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 96(SI), DX + XORL 100(SI), CX + XORL 104(SI), R8 + XORL 108(SI), R9 + MOVL DX, 96(DI) + MOVL CX, 100(DI) + MOVL R8, 104(DI) + MOVL R9, 108(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 160(SI), DX + XORL 164(SI), CX + XORL 168(SI), R8 + XORL 172(SI), R9 + MOVL DX, 160(DI) + MOVL CX, 164(DI) + MOVL R8, 168(DI) + MOVL R9, 172(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + XORL 224(SI), DX + XORL 228(SI), CX + XORL 232(SI), R8 + XORL 236(SI), R9 + MOVL DX, 224(DI) + MOVL CX, 228(DI) + MOVL R8, 232(DI) + MOVL R9, 236(DI) + PADDL 160(R12), X13 + PADDL 208(R12), X9 + PADDL 256(R12), X3 + PADDL 96(R12), X2 + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 48(SI), DX + XORL 52(SI), CX + XORL 56(SI), R8 + XORL 60(SI), R9 + MOVL DX, 48(DI) + MOVL CX, 52(DI) + MOVL R8, 56(DI) + MOVL R9, 60(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 112(SI), DX + XORL 116(SI), CX + XORL 120(SI), R8 + XORL 124(SI), R9 + MOVL DX, 112(DI) + MOVL CX, 116(DI) + MOVL R8, 120(DI) + MOVL R9, 124(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 176(SI), DX + XORL 180(SI), CX + XORL 184(SI), R8 + XORL 188(SI), R9 + MOVL DX, 176(DI) + MOVL CX, 180(DI) + MOVL R8, 184(DI) + MOVL R9, 188(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + XORL 240(SI), DX + XORL 244(SI), CX + XORL 248(SI), R8 + XORL 252(SI), R9 + MOVL DX, 240(DI) + MOVL CX, 244(DI) + MOVL R8, 248(DI) + MOVL R9, 252(DI) + MOVQ 352(R12), R9 + SUBQ $0x00000100, R9 + ADDQ $0x00000100, SI + ADDQ $0x00000100, DI + CMPQ R9, $0x00000100 + JAE BYTESATLEAST256 + CMPQ R9, $0x00 + JBE DONE - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(R12) - MOVL R8, 4 (R12) - MOVL AX, 8 (R12) - MOVL R11, 12 (R12) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(R12) - MOVL R8, 20 (R12) - MOVL AX, 24 (R12) - MOVL R11, 28 (R12) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(R12) - MOVL CX, 36 (R12) - MOVL R8, 40 (R12) - MOVL AX, 44 (R12) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(R12) - MOVL CX, 52 (R12) - MOVL R8, 56 (R12) - MOVL AX, 60 (R12) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(R12),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(R12) - MOVOA X2,80(R12) - MOVOA X3,96(R12) - MOVOA X0,112(R12) - MOVOA 0(R12),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(R12) - MOVOA X2,144(R12) - MOVOA X3,160(R12) - MOVOA X0,176(R12) - MOVOA 16(R12),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(R12) - MOVOA X2,208(R12) - MOVOA X0,224(R12) - MOVOA 32(R12),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(R12) - MOVOA X2,256(R12) - MOVOA X0,272(R12) - BYTESATLEAST256: - MOVL 16(R12),DX - MOVL 36 (R12),CX - MOVL DX,288(R12) - MOVL CX,304(R12) - SHLQ $32,CX - ADDQ CX,DX - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (R12) - MOVL CX, 308 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (R12) - MOVL CX, 312 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (R12) - MOVL CX, 316 (R12) - ADDQ $1,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(R12) - MOVL CX, 36 (R12) - MOVQ R9,352(R12) - MOVQ $20,DX - MOVOA 64(R12),X0 - MOVOA 80(R12),X1 - MOVOA 96(R12),X2 - MOVOA 256(R12),X3 - MOVOA 272(R12),X4 - MOVOA 128(R12),X5 - MOVOA 144(R12),X6 - MOVOA 176(R12),X7 - MOVOA 192(R12),X8 - MOVOA 208(R12),X9 - MOVOA 224(R12),X10 - MOVOA 304(R12),X11 - MOVOA 112(R12),X12 - MOVOA 160(R12),X13 - MOVOA 240(R12),X14 - MOVOA 288(R12),X15 - MAINLOOP1: - MOVOA X1,320(R12) - MOVOA X2,336(R12) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(R12),X1 - MOVOA X12,320(R12) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(R12),X2 - MOVOA X0,336(R12) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(R12),X0 - MOVOA X1,320(R12) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(R12),X12 - MOVOA X2,336(R12) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(R12),X1 - MOVOA X0,320(R12) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(R12),X2 - MOVOA X12,336(R12) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(R12),X12 - MOVOA 336(R12),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(R12),X12 - PADDL 176(R12),X7 - PADDL 224(R12),X10 - PADDL 272(R12),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(R12),X14 - PADDL 64(R12),X0 - PADDL 128(R12),X5 - PADDL 192(R12),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(R12),X15 - PADDL 304(R12),X11 - PADDL 80(R12),X1 - PADDL 144(R12),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(R12),X13 - PADDL 208(R12),X9 - PADDL 256(R12),X3 - PADDL 96(R12),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 352(R12),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 360(R12),DI - MOVQ R9,CX +BYTESBETWEEN1AND255: + CMPQ R9, $0x40 + JAE NOCOPY + MOVQ DI, DX + LEAQ 360(R12), DI + MOVQ R9, CX REP; MOVSB - LEAQ 360(R12),DI - LEAQ 360(R12),SI - NOCOPY: - MOVQ R9,352(R12) - MOVOA 48(R12),X0 - MOVOA 0(R12),X1 - MOVOA 16(R12),X2 - MOVOA 32(R12),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(R12),X0 - PADDL 0(R12),X1 - PADDL 16(R12),X2 - PADDL 32(R12),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 352(R12),R9 - MOVL 16(R12),CX - MOVL 36 (R12),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(R12) - MOVL R8, 36 (R12) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX + LEAQ 360(R12), DI + LEAQ 360(R12), SI + +NOCOPY: + MOVQ R9, 352(R12) + MOVOA 48(R12), X0 + MOVOA (R12), X1 + MOVOA 16(R12), X2 + MOVOA 32(R12), X3 + MOVOA X1, X4 + MOVQ $0x00000014, CX + +MAINLOOP2: + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + SUBQ $0x04, CX + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PXOR X7, X7 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + JA MAINLOOP2 + PADDL 48(R12), X0 + PADDL (R12), X1 + PADDL 16(R12), X2 + PADDL 32(R12), X3 + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL (SI), CX + XORL 48(SI), R8 + XORL 32(SI), R9 + XORL 16(SI), AX + MOVL CX, (DI) + MOVL R8, 48(DI) + MOVL R9, 32(DI) + MOVL AX, 16(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 20(SI), CX + XORL 4(SI), R8 + XORL 52(SI), R9 + XORL 36(SI), AX + MOVL CX, 20(DI) + MOVL R8, 4(DI) + MOVL R9, 52(DI) + MOVL AX, 36(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 40(SI), CX + XORL 24(SI), R8 + XORL 8(SI), R9 + XORL 56(SI), AX + MOVL CX, 40(DI) + MOVL R8, 24(DI) + MOVL R9, 8(DI) + MOVL AX, 56(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + XORL 60(SI), CX + XORL 44(SI), R8 + XORL 28(SI), R9 + XORL 12(SI), AX + MOVL CX, 60(DI) + MOVL R8, 44(DI) + MOVL R9, 28(DI) + MOVL AX, 12(DI) + MOVQ 352(R12), R9 + MOVL 16(R12), CX + MOVL 36(R12), R8 + ADDQ $0x01, CX + SHLQ $0x20, R8 + ADDQ R8, CX + MOVQ CX, R8 + SHRQ $0x20, R8 + MOVL CX, 16(R12) + MOVL R8, 36(R12) + CMPQ R9, $0x40 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI, SI + MOVQ DX, DI + MOVQ R9, CX REP; MOVSB - BYTESATLEAST64: - DONE: + +BYTESATLEAST64: +DONE: RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 + +BYTESATLEAST65: + SUBQ $0x40, R9 + ADDQ $0x40, DI + ADDQ $0x40, SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 1ea9275b8..a01ef4357 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { // leftEncode returns max 9 bytes c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 3ca9e89e2..c0d1c29e6 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -510,8 +510,8 @@ userAuthLoop: if err := s.transport.writePacket(Marshal(discMsg)); err != nil { return nil, err } - - return nil, discMsg + authErrs = append(authErrs, discMsg) + return nil, &ServerAuthError{Errors: authErrs} } var userAuthReq userAuthRequestMsg diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 000000000..de58dfb8d --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 000000000..e3784123c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 000000000..060fd6c64 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f3..7688c356b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec..617b4a476 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f97..0c5f64aa8 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2199,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a..6ff6bee7e 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780e..ac76165ce 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb33217..109997d77 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 000000000..ec2acfe54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 000000000..b838cb9e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb1..32a44514e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33ef..ce208ce6d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9f..170d21ddf 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea17..f1caf0f78 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 000000000..a0fd7e2f7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c5..600a68078 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 000000000..4d0888b0c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e1..6e08a76a7 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab..7ca4fa12a 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index e14b766a3..6ab02b6c3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -656,7 +668,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -666,7 +678,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2..6f15ba1ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4cb..230a94549 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75..745e5c7e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451..dd2262a40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a28894..8cf3670bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1..7bf5c04bb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 000000000..07ac8e09d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 000000000..297e97bce --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b246..ccba391c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2620,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2933,11 +2960,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3238,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3255,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3654,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168d..0c00cb3f3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b592..dfb364554 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e8..d46dcf78a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8e..3af3248a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -235,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e7..292bcf028 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -233,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea780..782b7110f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1e..84973fd92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b4..6d9cbc3b2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6f..5f9fedbce 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cefb..bb0026ee0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a3..46120db5c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a5..5c951634f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61b..11a84d5af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c210..f78c4617c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aace..aeb777c34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -234,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5adb..5cc1e8eb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f681..f485dbf45 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c232..1893e2fe8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf51..16a4017da 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a55..a5459e766 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9f2550dc3..8daaf3faf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1724,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1768,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1797,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1829,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1897,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1949,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -3766,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3987,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4082,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4609,7 +4745,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5349,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af46..2e5d5a443 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba..4e613cf63 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a314..4510bfc3f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c..51311e205 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc0..6f5252880 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aefe..05ff623f9 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f56060..df6bf948e 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493..93a798ab6 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index f3426bd3c..fd2e72c12 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.195.0" +const Version = "0.203.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 597daf0a7..09b7f6487 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -32,6 +32,11 @@ "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", "location": "europe-west3" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", @@ -41,9 +46,54 @@ "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west4.rep.googleapis.com/", + "location": "us-west4" } ], - "etag": "\"38363036373236373330353534313035333932\"", + "etag": "\"3132333635343336333933383332343134323139\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1029,6 +1079,34 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "relocate": { + "description": "Initiates a long-running Relocate Bucket operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.buckets.relocate", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to be moved.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/relocate", + "request": { + "$ref": "RelocateBucketRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "restore": { "description": "Restores a soft-deleted bucket.", "httpMethod": "POST", @@ -2829,6 +2907,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "softDeleted": { "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", @@ -3304,6 +3387,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -3781,6 +3869,38 @@ }, "operations": { "methods": { + "advanceRelocateBucket": { + "description": "Starts asynchronous advancement of the relocate bucket operation in the case of required write downtime, to allow it to lock the bucket at the source location, and proceed with the bucket location swap. The server makes a best effort to advance the relocate bucket operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.advanceRelocateBucket", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to advance the relocate for.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/advanceRelocateBucket", + "request": { + "$ref": "AdvanceRelocateBucketOperationRequest" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "cancel": { "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", "httpMethod": "POST", @@ -4136,9 +4256,26 @@ } } }, - "revision": "20240819", + "revision": "20241008", "rootUrl": "https://storage.googleapis.com/", "schemas": { + "AdvanceRelocateBucketOperationRequest": { + "description": "An AdvanceRelocateBucketOperation request.", + "id": "AdvanceRelocateBucketOperationRequest", + "properties": { + "expireTime": { + "description": "Specifies the time when the relocation will revert to the sync stage if the relocation hasn't succeeded.", + "format": "date-time", + "type": "string" + }, + "ttl": { + "description": "Specifies the duration after which the relocation will revert to the sync stage if the relocation hasn't succeeded. Optional, if not supplied, a default value of 12h will be used.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "AnywhereCache": { "description": "An Anywhere Cache instance.", "id": "AnywhereCache", @@ -5597,6 +5734,10 @@ }, "type": "object" }, + "restoreToken": { + "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.", + "type": "string" + }, "retention": { "description": "A collection of object level retention parameters.", "properties": { @@ -5865,6 +6006,34 @@ }, "type": "object" }, + "RelocateBucketRequest": { + "description": "A Relocate Bucket request.", + "id": "RelocateBucketRequest", + "properties": { + "destinationCustomPlacementConfig": { + "description": "The bucket's new custom placement configuration if relocating to a Custom Dual Region.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "destinationLocation": { + "description": "The new location the bucket will be relocated to.", + "type": "string" + }, + "validateOnly": { + "description": "If true, validate the operation, but do not actually relocate the bucket.", + "type": "boolean" + } + }, + "type": "object" + }, "RewriteResponse": { "description": "A rewrite response.", "id": "RewriteResponse", diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index b16e3f227..2c11b2d8d 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -343,6 +343,34 @@ type ProjectsServiceAccountService struct { s *Service } +// AdvanceRelocateBucketOperationRequest: An AdvanceRelocateBucketOperation +// request. +type AdvanceRelocateBucketOperationRequest struct { + // ExpireTime: Specifies the time when the relocation will revert to the sync + // stage if the relocation hasn't succeeded. + ExpireTime string `json:"expireTime,omitempty"` + // Ttl: Specifies the duration after which the relocation will revert to the + // sync stage if the relocation hasn't succeeded. Optional, if not supplied, a + // default value of 12h will be used. + Ttl string `json:"ttl,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExpireTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ExpireTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AdvanceRelocateBucketOperationRequest) MarshalJSON() ([]byte, error) { + type NoMethod AdvanceRelocateBucketOperationRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // AnywhereCache: An Anywhere Cache instance. type AnywhereCache struct { // AdmissionPolicy: The cache-level entry admission policy. @@ -2236,6 +2264,10 @@ type Object struct { // Owner: The owner of the object. This will always be the uploader of the // object. Owner *ObjectOwner `json:"owner,omitempty"` + // RestoreToken: Restore token used to differentiate deleted objects with the + // same name and generation. This field is only returned for deleted objects in + // hierarchical namespace buckets. + RestoreToken string `json:"restoreToken,omitempty"` // Retention: A collection of object level retention parameters. Retention *ObjectRetention `json:"retention,omitempty"` // RetentionExpirationTime: A server-determined value that specifies the @@ -2639,6 +2671,59 @@ func (s PolicyBindings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// RelocateBucketRequest: A Relocate Bucket request. +type RelocateBucketRequest struct { + // DestinationCustomPlacementConfig: The bucket's new custom placement + // configuration if relocating to a Custom Dual Region. + DestinationCustomPlacementConfig *RelocateBucketRequestDestinationCustomPlacementConfig `json:"destinationCustomPlacementConfig,omitempty"` + // DestinationLocation: The new location the bucket will be relocated to. + DestinationLocation string `json:"destinationLocation,omitempty"` + // ValidateOnly: If true, validate the operation, but do not actually relocate + // the bucket. + ValidateOnly bool `json:"validateOnly,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequest) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RelocateBucketRequestDestinationCustomPlacementConfig: The bucket's new +// custom placement configuration if relocating to a Custom Dual Region. +type RelocateBucketRequestDestinationCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequestDestinationCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequestDestinationCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // RewriteResponse: A rewrite response. type RewriteResponse struct { // Done: true if the copy is finished; otherwise, false if the copy is in @@ -5330,6 +5415,109 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return ret, nil } +type BucketsRelocateCall struct { + s *Service + bucket string + relocatebucketrequest *RelocateBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Relocate: Initiates a long-running Relocate Bucket operation on the +// specified bucket. +// +// - bucket: Name of the bucket to be moved. +func (r *BucketsService) Relocate(bucket string, relocatebucketrequest *RelocateBucketRequest) *BucketsRelocateCall { + c := &BucketsRelocateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.relocatebucketrequest = relocatebucketrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRelocateCall) Fields(s ...googleapi.Field) *BucketsRelocateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRelocateCall) Context(ctx context.Context) *BucketsRelocateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRelocateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/relocate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.relocate" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + type BucketsRestoreCall struct { s *Service bucket string @@ -9961,6 +10149,17 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate soft-deleted objects with the same name and generation. +// Only applicable for hierarchical namespace buckets and if softDeleted is set +// to true. This parameter is optional, and is only required in the rare case +// when there are multiple soft-deleted objects with the same name and +// generation. +func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more // information, see Soft Delete @@ -11056,6 +11255,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate sof-deleted objects with the same name and generation. Only +// applicable for hierarchical namespace buckets. This parameter is optional, +// and is only required in the rare case when there are multiple soft-deleted +// objects with the same name and generation. +func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { @@ -12074,6 +12283,92 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) return ret, nil } +type OperationsAdvanceRelocateBucketCall struct { + s *Service + bucket string + operationId string + advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AdvanceRelocateBucket: Starts asynchronous advancement of the relocate +// bucket operation in the case of required write downtime, to allow it to lock +// the bucket at the source location, and proceed with the bucket location +// swap. The server makes a best effort to advance the relocate bucket +// operation, but success is not guaranteed. +// +// - bucket: Name of the bucket to advance the relocate for. +// - operationId: ID of the operation resource. +func (r *OperationsService) AdvanceRelocateBucket(bucket string, operationId string, advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest) *OperationsAdvanceRelocateBucketCall { + c := &OperationsAdvanceRelocateBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + c.advancerelocatebucketoperationrequest = advancerelocatebucketoperationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsAdvanceRelocateBucketCall) Fields(s ...googleapi.Field) *OperationsAdvanceRelocateBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsAdvanceRelocateBucketCall) Context(ctx context.Context) *OperationsAdvanceRelocateBucketCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/advanceRelocateBucket") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.advanceRelocateBucket" call. +func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + type OperationsCancelCall struct { s *Service bucket string diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index d2a4f7664..ff3539d89 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -247,6 +247,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) return pool, err } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 2e2b15c6e..d5b213e0f 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -130,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 000000000..6e01be017 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,892 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/distribution.proto + +package distribution + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `Distribution` contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those values +// across a set of buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by formulas for buckets of fixed or exponentially increasing +// widths. +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in `bucket_counts` if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. If the distribution does not + // contain a histogram, then omit this field. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // The number of values in each bucket of the histogram, as described in + // `bucket_options`. If the distribution does not have a histogram, then omit + // this field. If there is a histogram, then the sum of the values in + // `bucket_counts` must equal the value in the `count` field of the + // distribution. + // + // If present, `bucket_counts` should contain N values, where N is the number + // of buckets specified in `bucket_options`. If you supply fewer than N + // values, the remaining values are assumed to be 0. + // + // The order of the values in `bucket_counts` follows the bucket numbering + // schemes described for the three bucket types. The first value must be the + // count for the underflow bucket (number 0). The next N-2 values are the + // counts for the finite buckets (number 1 through N-2). The N'th value in + // `bucket_counts` is the count for the overflow bucket (number N-1). + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // Must be in increasing order of `value` field. + Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (x *Distribution) Reset() { + *x = Distribution{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution) ProtoMessage() {} + +func (x *Distribution) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution.ProtoReflect.Descriptor instead. +func (*Distribution) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0} +} + +func (x *Distribution) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Distribution) GetMean() float64 { + if x != nil { + return x.Mean + } + return 0 +} + +func (x *Distribution) GetSumOfSquaredDeviation() float64 { + if x != nil { + return x.SumOfSquaredDeviation + } + return 0 +} + +func (x *Distribution) GetRange() *Distribution_Range { + if x != nil { + return x.Range + } + return nil +} + +func (x *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if x != nil { + return x.BucketOptions + } + return nil +} + +func (x *Distribution) GetBucketCounts() []int64 { + if x != nil { + return x.BucketCounts + } + return nil +} + +func (x *Distribution) GetExemplars() []*Distribution_Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Distribution_Range) Reset() { + *x = Distribution_Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Range) ProtoMessage() {} + +func (x *Distribution_Range) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Range.ProtoReflect.Descriptor instead. +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Distribution_Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Distribution_Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// `BucketOptions` describes the bucket boundaries used to create a histogram +// for the distribution. The buckets can be in a linear sequence, an +// exponential sequence, or each bucket can be specified explicitly. +// `BucketOptions` does not include the number of values in each bucket. +// +// A bucket has an inclusive lower bound and exclusive upper bound for the +// values that are counted for that bucket. The upper bound of a bucket must +// be strictly greater than the lower bound. The sequence of N buckets for a +// distribution consists of an underflow bucket (number 0), zero or more +// finite buckets (number 1 through N - 2) and an overflow bucket (number N - +// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the +// same as the upper bound of bucket i - 1. The buckets span the whole range +// of finite values: lower bound of the underflow bucket is -infinity and the +// upper bound of the overflow bucket is +infinity. The finite buckets are +// so-called because both bounds are finite. +type Distribution_BucketOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Exactly one of these three fields must be set. + // + // Types that are assignable to Options: + // + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` +} + +func (x *Distribution_BucketOptions) Reset() { + *x = Distribution_BucketOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions) ProtoMessage() {} + +func (x *Distribution_BucketOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1} +} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (x *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + // The linear bucket. + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + // The exponential buckets. + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + // The explicit buckets. + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +// Exemplars are example points that may be used to annotate aggregated +// distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket, such as a trace ID that +// was active when a value was added. They may contain further information, +// such as a example values and timestamps, origin, etc. +type Distribution_Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Value of the exemplar point. This value determines to which bucket the + // exemplar belongs. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. Examples are: + // + // Trace: type.googleapis.com/google.monitoring.v3.SpanContext + // + // Literal string: type.googleapis.com/google.protobuf.StringValue + // + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels + // + // There may be only a single attachment of any given message type in a + // single exemplar, and this is enforced by the system. + Attachments []*anypb.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` +} + +func (x *Distribution_Exemplar) Reset() { + *x = Distribution_Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Exemplar) ProtoMessage() {} + +func (x *Distribution_Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Exemplar.ProtoReflect.Descriptor instead. +func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Distribution_Exemplar) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Distribution_Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { + if x != nil { + return x.Attachments + } + return nil +} + +// Specifies a linear sequence of buckets that all have the same width +// (except overflow and underflow). Each bucket represents a constant +// absolute uncertainty on the specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` +} + +func (x *Distribution_BucketOptions_Linear) Reset() { + *x = Distribution_BucketOptions_Linear{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Linear) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Linear) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Linear.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (x *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetWidth() float64 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { + if x != nil { + return x.Offset + } + return 0 +} + +// Specifies an exponential sequence of buckets that have a width that is +// proportional to the value of the lower bound. Each bucket represents a +// constant relative uncertainty on a specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` +} + +func (x *Distribution_BucketOptions_Exponential) Reset() { + *x = Distribution_BucketOptions_Exponential{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Exponential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Exponential) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Exponential.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if x != nil { + return x.GrowthFactor + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetScale() float64 { + if x != nil { + return x.Scale + } + return 0 +} + +// Specifies a set of buckets with arbitrary widths. +// +// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following +// boundaries: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// The `bounds` field must contain at least one element. If `bounds` has +// only one element, then there are no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` +} + +func (x *Distribution_BucketOptions_Explicit) Reset() { + *x = Distribution_BucketOptions_Explicit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Explicit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Explicit) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Explicit.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if x != nil { + return x.Bounds + } + return nil +} + +var File_google_api_distribution_proto protoreflect.FileDescriptor + +var file_google_api_distribution_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x08, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x6d, 0x65, + 0x61, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x75, 0x6d, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x71, 0x75, + 0x61, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x75, 0x6d, 0x4f, 0x66, 0x53, 0x71, 0x75, 0x61, 0x72, + 0x65, 0x64, 0x44, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x1a, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x61, 0x78, 0x1a, 0xb9, 0x04, 0x0a, 0x0d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x56, 0x0a, 0x0e, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x65, 0x0a, + 0x13, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x00, + 0x52, 0x12, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x1a, 0x64, 0x0a, 0x06, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, + 0x69, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, + 0x64, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x1a, 0x76, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, + 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x67, 0x72, + 0x6f, 0x77, 0x74, 0x68, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x1a, 0x22, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x92, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x0b, + 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x42, 0x71, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_distribution_proto_rawDescOnce sync.Once + file_google_api_distribution_proto_rawDescData = file_google_api_distribution_proto_rawDesc +) + +func file_google_api_distribution_proto_rawDescGZIP() []byte { + file_google_api_distribution_proto_rawDescOnce.Do(func() { + file_google_api_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_distribution_proto_rawDescData) + }) + return file_google_api_distribution_proto_rawDescData +} + +var file_google_api_distribution_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_api_distribution_proto_goTypes = []interface{}{ + (*Distribution)(nil), // 0: google.api.Distribution + (*Distribution_Range)(nil), // 1: google.api.Distribution.Range + (*Distribution_BucketOptions)(nil), // 2: google.api.Distribution.BucketOptions + (*Distribution_Exemplar)(nil), // 3: google.api.Distribution.Exemplar + (*Distribution_BucketOptions_Linear)(nil), // 4: google.api.Distribution.BucketOptions.Linear + (*Distribution_BucketOptions_Exponential)(nil), // 5: google.api.Distribution.BucketOptions.Exponential + (*Distribution_BucketOptions_Explicit)(nil), // 6: google.api.Distribution.BucketOptions.Explicit + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*anypb.Any)(nil), // 8: google.protobuf.Any +} +var file_google_api_distribution_proto_depIdxs = []int32{ + 1, // 0: google.api.Distribution.range:type_name -> google.api.Distribution.Range + 2, // 1: google.api.Distribution.bucket_options:type_name -> google.api.Distribution.BucketOptions + 3, // 2: google.api.Distribution.exemplars:type_name -> google.api.Distribution.Exemplar + 4, // 3: google.api.Distribution.BucketOptions.linear_buckets:type_name -> google.api.Distribution.BucketOptions.Linear + 5, // 4: google.api.Distribution.BucketOptions.exponential_buckets:type_name -> google.api.Distribution.BucketOptions.Exponential + 6, // 5: google.api.Distribution.BucketOptions.explicit_buckets:type_name -> google.api.Distribution.BucketOptions.Explicit + 7, // 6: google.api.Distribution.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 8, // 7: google.api.Distribution.Exemplar.attachments:type_name -> google.protobuf.Any + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_google_api_distribution_proto_init() } +func file_google_api_distribution_proto_init() { + if File_google_api_distribution_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_distribution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Linear); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Exponential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Explicit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_distribution_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_distribution_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_distribution_proto_goTypes, + DependencyIndexes: file_google_api_distribution_proto_depIdxs, + MessageInfos: file_google_api_distribution_proto_msgTypes, + }.Build() + File_google_api_distribution_proto = out.File + file_google_api_distribution_proto_rawDesc = nil + file_google_api_distribution_proto_goTypes = nil + file_google_api_distribution_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 000000000..42bcacc36 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,249 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/label.proto + +package label + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +// Enum value maps for LabelDescriptor_ValueType. +var ( + LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", + } + LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, + } +) + +func (x LabelDescriptor_ValueType) Enum() *LabelDescriptor_ValueType { + p := new(LabelDescriptor_ValueType) + *p = x + return p +} + +func (x LabelDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LabelDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_label_proto_enumTypes[0].Descriptor() +} + +func (LabelDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_label_proto_enumTypes[0] +} + +func (x LabelDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LabelDescriptor_ValueType.Descriptor instead. +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *LabelDescriptor) Reset() { + *x = LabelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_label_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelDescriptor) ProtoMessage() {} + +func (x *LabelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_label_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelDescriptor.ProtoReflect.Descriptor instead. +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0} +} + +func (x *LabelDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return LabelDescriptor_STRING +} + +func (x *LabelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_google_api_label_proto protoreflect.FileDescriptor + +var file_google_api_label_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x22, 0xb9, 0x01, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, + 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, + 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, + 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_label_proto_rawDescOnce sync.Once + file_google_api_label_proto_rawDescData = file_google_api_label_proto_rawDesc +) + +func file_google_api_label_proto_rawDescGZIP() []byte { + file_google_api_label_proto_rawDescOnce.Do(func() { + file_google_api_label_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_label_proto_rawDescData) + }) + return file_google_api_label_proto_rawDescData +} + +var file_google_api_label_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_label_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_label_proto_goTypes = []interface{}{ + (LabelDescriptor_ValueType)(0), // 0: google.api.LabelDescriptor.ValueType + (*LabelDescriptor)(nil), // 1: google.api.LabelDescriptor +} +var file_google_api_label_proto_depIdxs = []int32{ + 0, // 0: google.api.LabelDescriptor.value_type:type_name -> google.api.LabelDescriptor.ValueType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_label_proto_init() } +func file_google_api_label_proto_init() { + if File_google_api_label_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_label_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_label_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_label_proto_goTypes, + DependencyIndexes: file_google_api_label_proto_depIdxs, + EnumInfos: file_google_api_label_proto_enumTypes, + MessageInfos: file_google_api_label_proto_msgTypes, + }.Build() + File_google_api_label_proto = out.File + file_google_api_label_proto_rawDesc = nil + file_google_api_label_proto_goTypes = nil + file_google_api_label_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 000000000..d4b89c98d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,771 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/metric.proto + +package metric + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The kind of measurement. It describes how the data is reported. +// For information on setting the start time and end time based on +// the MetricKind, see [TimeInterval][google.monitoring.v3.TimeInterval]. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +// Enum value maps for MetricDescriptor_MetricKind. +var ( + MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", + } + MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, + } +) + +func (x MetricDescriptor_MetricKind) Enum() *MetricDescriptor_MetricKind { + p := new(MetricDescriptor_MetricKind) + *p = x + return p +} + +func (x MetricDescriptor_MetricKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_MetricKind) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[0].Descriptor() +} + +func (MetricDescriptor_MetricKind) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[0] +} + +func (x MetricDescriptor_MetricKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_MetricKind.Descriptor instead. +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +// Enum value maps for MetricDescriptor_ValueType. +var ( + MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", + } + MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, + } +) + +func (x MetricDescriptor_ValueType) Enum() *MetricDescriptor_ValueType { + p := new(MetricDescriptor_ValueType) + *p = x + return p +} + +func (x MetricDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[1].Descriptor() +} + +func (MetricDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[1] +} + +func (x MetricDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_ValueType.Descriptor instead. +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types should + // use a natural hierarchical grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + // + // Different systems might scale the values to be more easily displayed (so a + // value of `0.02kBy` _might_ be displayed as `20By`, and a value of + // `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is + // `kBy`, then the value of the metric is always in thousands of bytes, no + // matter how it might be displayed. + // + // If you want a custom metric to record the exact number of CPU-seconds used + // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is + // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 + // CPU-seconds, then the value is written as `12005`. + // + // Alternatively, if you want a custom metric to record data in a more + // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is + // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), + // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). + // + // The supported units are a subset of [The Unified Code for Units of + // Measure](https://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // * `1` dimensionless + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10^3) + // * `M` mega (10^6) + // * `G` giga (10^9) + // * `T` tera (10^12) + // * `P` peta (10^15) + // * `E` exa (10^18) + // * `Z` zetta (10^21) + // * `Y` yotta (10^24) + // + // * `m` milli (10^-3) + // * `u` micro (10^-6) + // * `n` nano (10^-9) + // * `p` pico (10^-12) + // * `f` femto (10^-15) + // * `a` atto (10^-18) + // * `z` zepto (10^-21) + // * `y` yocto (10^-24) + // + // * `Ki` kibi (2^10) + // * `Mi` mebi (2^20) + // * `Gi` gibi (2^30) + // * `Ti` tebi (2^40) + // * `Pi` pebi (2^50) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // - `/` division or ratio (as an infix operator). For examples, + // `kBy/{email}` or `MiBy/10ms` (although you should almost never + // have `/s` in a metric `unit`; rates should always be computed at + // query time from the underlying cumulative or delta value). + // - `.` multiplication or composition (as an infix operator). For + // examples, `GBy.d` or `k{watt}.h`. + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // - `Annotation` is just a comment if it follows a `UNIT`. If the annotation + // is used alone, then the unit is equivalent to `1`. For examples, + // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. + // - `NAME` is a sequence of non-blank printable ASCII characters not + // containing `{` or `}`. + // - `1` represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such + // as in `1/s`. It is typically used when none of the basic units are + // appropriate. For example, "new users per day" can be represented as + // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). + // - `%` represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of 0..100, + // and a metric value `3` means "3 percent"). + // - `10^2.%` indicates a metric contains a ratio, typically in the range + // 0..1, that will be multiplied by 100 and displayed as a percentage + // (so a metric value `0.03` means "3 percent"). + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. Metadata which can be used to guide usage of the metric. + Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional. The launch stage of the metric definition. + LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // Read-only. If present, then a [time + // series][google.monitoring.v3.TimeSeries], which is identified partially by + // a metric type and a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that + // is associated with this metric type can only be associated with one of the + // monitored resource types listed here. + MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` +} + +func (x *MetricDescriptor) Reset() { + *x = MetricDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor) ProtoMessage() {} + +func (x *MetricDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor.ProtoReflect.Descriptor instead. +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MetricDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (x *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +func (x *MetricDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MetricDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MetricDescriptor) GetMetadata() *MetricDescriptor_MetricDescriptorMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *MetricDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetMonitoredResourceTypes() []string { + if x != nil { + return x.MonitoredResourceTypes + } + return nil +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An existing metric type, see + // [google.api.MetricDescriptor][google.api.MetricDescriptor]. For example, + // `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *Metric) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Metric) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Additional annotations that can be used to guide the usage of a metric. +type MetricDescriptor_MetricDescriptorMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated. Must use the + // [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] + // instead. + // + // Deprecated: Do not use. + LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // The sampling period of metric data points. For metrics which are written + // periodically, consecutive data points are stored at this time interval, + // excluding data loss due to errors. Metrics with a higher granularity have + // a smaller sampling period. + SamplePeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` + // The delay of data points caused by ingestion. Data points older than this + // age are guaranteed to be ingested and available to be read, excluding + // data loss due to errors. + IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { + *x = MetricDescriptor_MetricDescriptorMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor_MetricDescriptorMetadata) ProtoMessage() {} + +func (x *MetricDescriptor_MetricDescriptorMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata.ProtoReflect.Descriptor instead. +func (*MetricDescriptor_MetricDescriptorMetadata) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// Deprecated: Do not use. +func (x *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *durationpb.Duration { + if x != nil { + return x.SamplePeriod + } + return nil +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb.Duration { + if x != nil { + return x.IngestDelay + } + return nil +} + +var File_google_api_metric_proto protoreflect.FileDescriptor + +var file_google_api_metric_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, + 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x48, + 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, + 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, + 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, + 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, + 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, + 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, + 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_metric_proto_rawDescOnce sync.Once + file_google_api_metric_proto_rawDescData = file_google_api_metric_proto_rawDesc +) + +func file_google_api_metric_proto_rawDescGZIP() []byte { + file_google_api_metric_proto_rawDescOnce.Do(func() { + file_google_api_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_metric_proto_rawDescData) + }) + return file_google_api_metric_proto_rawDescData +} + +var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_api_metric_proto_goTypes = []interface{}{ + (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind + (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType + (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor + (*Metric)(nil), // 3: google.api.Metric + (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata + nil, // 5: google.api.Metric.LabelsEntry + (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_google_api_metric_proto_depIdxs = []int32{ + 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor + 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata + 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage + 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry + 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage + 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration + 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_api_metric_proto_init() } +func file_google_api_metric_proto_init() { + if File_google_api_metric_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor_MetricDescriptorMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_metric_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_metric_proto_goTypes, + DependencyIndexes: file_google_api_metric_proto_depIdxs, + EnumInfos: file_google_api_metric_proto_enumTypes, + MessageInfos: file_google_api_metric_proto_msgTypes, + }.Build() + File_google_api_metric_proto = out.File + file_google_api_metric_proto_rawDesc = nil + file_google_api_metric_proto_goTypes = nil + file_google_api_metric_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 000000000..b4cee2980 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,476 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/monitored_resource.proto + +package monitoredres + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An object that describes the schema of a +// [MonitoredResource][google.api.MonitoredResource] object using a type name +// and a set of labels. For example, the monitored resource descriptor for +// Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // + // For a list of types, see [Monitored resource + // types](https://cloud.google.com/monitoring/api/resources) + // + // and [Logging resource + // types](https://cloud.google.com/logging/docs/api/v2/resource-list). + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // Optional. The launch stage of the monitored resource definition. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *MonitoredResourceDescriptor) Reset() { + *x = MonitoredResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceDescriptor) ProtoMessage() {} + +func (x *MonitoredResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceDescriptor.ProtoReflect.Descriptor instead. +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *MonitoredResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object +// that describes the resource's schema. Information in the `labels` field +// identifies the actual resource and its attributes according to the schema. +// For example, a particular Compute Engine VM instance could be represented by +// the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for +// `"gce_instance"` has labels +// `"project_id"`, `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "project_id": "my-project", +// "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource type. This field must match + // the `type` field of a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + // object. For example, the type of a Compute Engine VM instance is + // `gce_instance`. Some descriptors include the service name in the type; for + // example, the type of a Datastream stream is + // `datastream.googleapis.com/Stream`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResource) Reset() { + *x = MonitoredResource{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResource) ProtoMessage() {} + +func (x *MonitoredResource) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResource.ProtoReflect.Descriptor instead. +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *MonitoredResource) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResource) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] +// object. [MonitoredResource][google.api.MonitoredResource] objects contain the +// minimum set of information to uniquely identify a monitored resource +// instance. There is some other useful auxiliary metadata. Monitoring and +// Logging use an ingestion pipeline to extract metadata for cloud resources of +// all types, and store the metadata in this message. +type MonitoredResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google, including + // "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResourceMetadata) Reset() { + *x = MonitoredResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceMetadata) ProtoMessage() {} + +func (x *MonitoredResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceMetadata.ProtoReflect.Descriptor instead. +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{2} +} + +func (x *MonitoredResourceMetadata) GetSystemLabels() *structpb.Struct { + if x != nil { + return x.SystemLabels + } + return nil +} + +func (x *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +var File_google_api_monitored_resource_proto protoreflect.FileDescriptor + +var file_google_api_monitored_resource_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, + 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x1b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x41, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, + 0x19, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0c, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x56, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x79, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x16, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x72, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x72, 0x65, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_monitored_resource_proto_rawDescOnce sync.Once + file_google_api_monitored_resource_proto_rawDescData = file_google_api_monitored_resource_proto_rawDesc +) + +func file_google_api_monitored_resource_proto_rawDescGZIP() []byte { + file_google_api_monitored_resource_proto_rawDescOnce.Do(func() { + file_google_api_monitored_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_monitored_resource_proto_rawDescData) + }) + return file_google_api_monitored_resource_proto_rawDescData +} + +var file_google_api_monitored_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_api_monitored_resource_proto_goTypes = []interface{}{ + (*MonitoredResourceDescriptor)(nil), // 0: google.api.MonitoredResourceDescriptor + (*MonitoredResource)(nil), // 1: google.api.MonitoredResource + (*MonitoredResourceMetadata)(nil), // 2: google.api.MonitoredResourceMetadata + nil, // 3: google.api.MonitoredResource.LabelsEntry + nil, // 4: google.api.MonitoredResourceMetadata.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (api.LaunchStage)(0), // 6: google.api.LaunchStage + (*structpb.Struct)(nil), // 7: google.protobuf.Struct +} +var file_google_api_monitored_resource_proto_depIdxs = []int32{ + 5, // 0: google.api.MonitoredResourceDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.api.MonitoredResourceDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 2: google.api.MonitoredResource.labels:type_name -> google.api.MonitoredResource.LabelsEntry + 7, // 3: google.api.MonitoredResourceMetadata.system_labels:type_name -> google.protobuf.Struct + 4, // 4: google.api.MonitoredResourceMetadata.user_labels:type_name -> google.api.MonitoredResourceMetadata.UserLabelsEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_api_monitored_resource_proto_init() } +func file_google_api_monitored_resource_proto_init() { + if File_google_api_monitored_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_monitored_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_monitored_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_monitored_resource_proto_goTypes, + DependencyIndexes: file_google_api_monitored_resource_proto_depIdxs, + MessageInfos: file_google_api_monitored_resource_proto_msgTypes, + }.Build() + File_google_api_monitored_resource_proto = out.File + file_google_api_monitored_resource_proto_rawDesc = nil + file_google_api_monitored_resource_proto_goTypes = nil + file_google_api_monitored_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go new file mode 100644 index 000000000..cae02bce1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -0,0 +1,189 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/calendar_period.proto + +package calendarperiod + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `CalendarPeriod` represents the abstract concept of a time period that has +// a canonical start. Grammatically, "the start of the current +// `CalendarPeriod`." All calendar times begin at midnight UTC. +type CalendarPeriod int32 + +const ( + // Undefined period, raises an error. + CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED CalendarPeriod = 0 + // A day. + CalendarPeriod_DAY CalendarPeriod = 1 + // A week. Weeks begin on Monday, following + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_WEEK CalendarPeriod = 2 + // A fortnight. The first calendar fortnight of the year begins at the start + // of week 1 according to + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_FORTNIGHT CalendarPeriod = 3 + // A month. + CalendarPeriod_MONTH CalendarPeriod = 4 + // A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each + // year. + CalendarPeriod_QUARTER CalendarPeriod = 5 + // A half-year. Half-years start on dates 1-Jan and 1-Jul. + CalendarPeriod_HALF CalendarPeriod = 6 + // A year. + CalendarPeriod_YEAR CalendarPeriod = 7 +) + +// Enum value maps for CalendarPeriod. +var ( + CalendarPeriod_name = map[int32]string{ + 0: "CALENDAR_PERIOD_UNSPECIFIED", + 1: "DAY", + 2: "WEEK", + 3: "FORTNIGHT", + 4: "MONTH", + 5: "QUARTER", + 6: "HALF", + 7: "YEAR", + } + CalendarPeriod_value = map[string]int32{ + "CALENDAR_PERIOD_UNSPECIFIED": 0, + "DAY": 1, + "WEEK": 2, + "FORTNIGHT": 3, + "MONTH": 4, + "QUARTER": 5, + "HALF": 6, + "YEAR": 7, + } +) + +func (x CalendarPeriod) Enum() *CalendarPeriod { + p := new(CalendarPeriod) + *p = x + return p +} + +func (x CalendarPeriod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CalendarPeriod) Descriptor() protoreflect.EnumDescriptor { + return file_google_type_calendar_period_proto_enumTypes[0].Descriptor() +} + +func (CalendarPeriod) Type() protoreflect.EnumType { + return &file_google_type_calendar_period_proto_enumTypes[0] +} + +func (x CalendarPeriod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CalendarPeriod.Descriptor instead. +func (CalendarPeriod) EnumDescriptor() ([]byte, []int) { + return file_google_type_calendar_period_proto_rawDescGZIP(), []int{0} +} + +var File_google_type_calendar_period_proto protoreflect.FileDescriptor + +var file_google_type_calendar_period_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, + 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2a, 0x7f, 0x0a, 0x0e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x41, 0x4c, 0x45, 0x4e, 0x44, 0x41, 0x52, 0x5f, 0x50, + 0x45, 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x57, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x54, 0x4e, 0x49, + 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x04, + 0x12, 0x0b, 0x0a, 0x07, 0x51, 0x55, 0x41, 0x52, 0x54, 0x45, 0x52, 0x10, 0x05, 0x12, 0x08, 0x0a, + 0x04, 0x48, 0x41, 0x4c, 0x46, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, + 0x07, 0x42, 0x78, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x13, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x3b, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_calendar_period_proto_rawDescOnce sync.Once + file_google_type_calendar_period_proto_rawDescData = file_google_type_calendar_period_proto_rawDesc +) + +func file_google_type_calendar_period_proto_rawDescGZIP() []byte { + file_google_type_calendar_period_proto_rawDescOnce.Do(func() { + file_google_type_calendar_period_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_calendar_period_proto_rawDescData) + }) + return file_google_type_calendar_period_proto_rawDescData +} + +var file_google_type_calendar_period_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_type_calendar_period_proto_goTypes = []interface{}{ + (CalendarPeriod)(0), // 0: google.type.CalendarPeriod +} +var file_google_type_calendar_period_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_calendar_period_proto_init() } +func file_google_type_calendar_period_proto_init() { + if File_google_type_calendar_period_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_calendar_period_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_calendar_period_proto_goTypes, + DependencyIndexes: file_google_type_calendar_period_proto_depIdxs, + EnumInfos: file_google_type_calendar_period_proto_enumTypes, + }.Build() + File_google_type_calendar_period_proto = out.File + file_google_type_calendar_period_proto_rawDesc = nil + file_google_type_calendar_period_proto_goTypes = nil + file_google_type_calendar_period_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go new file mode 100644 index 000000000..7ea79410a --- /dev/null +++ b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package audit contains interfaces for audit logging during authorization. +package audit + +import ( + "encoding/json" + "sync" +) + +// loggerBuilderRegistry holds a map of audit logger builders and a mutex +// to facilitate thread-safe reading/writing operations. +type loggerBuilderRegistry struct { + mu sync.Mutex + builders map[string]LoggerBuilder +} + +var ( + registry = loggerBuilderRegistry{ + builders: make(map[string]LoggerBuilder), + } +) + +// RegisterLoggerBuilder registers the builder in a global map +// using b.Name() as the key. +// +// This should only be called during initialization time (i.e. in an init() +// function). If multiple builders are registered with the same name, +// the one registered last will take effect. +func RegisterLoggerBuilder(b LoggerBuilder) { + registry.mu.Lock() + defer registry.mu.Unlock() + registry.builders[b.Name()] = b +} + +// GetLoggerBuilder returns a builder with the given name. +// It returns nil if the builder is not found in the registry. +func GetLoggerBuilder(name string) LoggerBuilder { + registry.mu.Lock() + defer registry.mu.Unlock() + return registry.builders[name] +} + +// Event contains information passed to the audit logger as part of an +// audit logging event. +type Event struct { + // FullMethodName is the full method name of the audited RPC, in the format + // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". + FullMethodName string + // Principal is the identity of the caller. Currently it will only be + // available in certificate-based TLS authentication. + Principal string + // PolicyName is the authorization policy name or the xDS RBAC filter name. + PolicyName string + // MatchedRule is the matched rule or policy name in the xDS RBAC filter. + // It will be empty if there is no match. + MatchedRule string + // Authorized indicates whether the audited RPC is authorized or not. + Authorized bool +} + +// LoggerConfig represents an opaque data structure holding an audit +// logger configuration. Concrete types representing configuration of specific +// audit loggers must embed this interface to implement it. +type LoggerConfig interface { + loggerConfig() +} + +// Logger is the interface to be implemented by audit loggers. +// +// An audit logger is a logger instance that can be configured via the +// authorization policy API or xDS HTTP RBAC filters. When the authorization +// decision meets the condition for audit, all the configured audit loggers' +// Log() method will be invoked to log that event. +// +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. +type Logger interface { + // Log performs audit logging for the provided audit event. + // + // This method is invoked in the RPC path and therefore implementations + // must not block. + Log(*Event) +} + +// LoggerBuilder is the interface to be implemented by audit logger +// builders that are used at runtime to configure and instantiate audit loggers. +// +// Users who want to implement their own audit logging logic should +// implement this interface, along with the Logger interface, and register +// it by calling RegisterLoggerBuilder() at init time. +// +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. +type LoggerBuilder interface { + // ParseLoggerConfig parses the given JSON bytes into a structured + // logger config this builder can use to build an audit logger. + ParseLoggerConfig(config json.RawMessage) (LoggerConfig, error) + // Build builds an audit logger with the given logger config. + // This will only be called with valid configs returned from + // ParseLoggerConfig() and any runtime issues such as failing to + // create a file should be handled by the logger implementation instead of + // failing the logger instantiation. So implementers need to make sure it + // can return a logger without error at this stage. + Build(LoggerConfig) Logger + // Name returns the name of logger built by this builder. + // This is used to register and pick the builder. + Name() string +} diff --git a/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go new file mode 100644 index 000000000..f9bfc2d7d --- /dev/null +++ b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stdout defines an stdout audit logger. +package stdout + +import ( + "encoding/json" + "log" + "os" + "time" + + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/grpclog" +) + +var grpcLogger = grpclog.Component("authz-audit") + +// Name is the string to identify this logger type in the registry +const Name = "stdout_logger" + +func init() { + audit.RegisterLoggerBuilder(&loggerBuilder{ + goLogger: log.New(os.Stdout, "", 0), + }) +} + +type event struct { + FullMethodName string `json:"rpc_method"` + Principal string `json:"principal"` + PolicyName string `json:"policy_name"` + MatchedRule string `json:"matched_rule"` + Authorized bool `json:"authorized"` + Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method +} + +// logger implements the audit.logger interface by logging to standard output. +type logger struct { + goLogger *log.Logger +} + +// Log marshals the audit.Event to json and prints it to standard output. +func (l *logger) Log(event *audit.Event) { + jsonContainer := map[string]any{ + "grpc_audit_log": convertEvent(event), + } + jsonBytes, err := json.Marshal(jsonContainer) + if err != nil { + grpcLogger.Errorf("failed to marshal AuditEvent data to JSON: %v", err) + return + } + l.goLogger.Println(string(jsonBytes)) +} + +// loggerConfig represents the configuration for the stdout logger. +// It is currently empty and implements the audit.Logger interface by embedding it. +type loggerConfig struct { + audit.LoggerConfig +} + +type loggerBuilder struct { + goLogger *log.Logger +} + +func (loggerBuilder) Name() string { + return Name +} + +// Build returns a new instance of the stdout logger. +// Passed in configuration is ignored as the stdout logger does not +// expect any configuration to be provided. +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &logger{ + goLogger: lb.goLogger, + } +} + +// ParseLoggerConfig is a no-op since the stdout logger does not accept any configuration. +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + if len(config) != 0 && string(config) != "{}" { + grpcLogger.Warningf("Stdout logger doesn't support custom configs. Ignoring:\n%s", string(config)) + } + return &loggerConfig{}, nil +} + +func convertEvent(auditEvent *audit.Event) *event { + return &event{ + FullMethodName: auditEvent.FullMethodName, + Principal: auditEvent.Principal, + PolicyName: auditEvent.PolicyName, + MatchedRule: auditEvent.MatchedRule, + Authorized: auditEvent.Authorized, + Timestamp: time.Now().Format(time.RFC3339Nano), + } +} diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go new file mode 100644 index 000000000..ddd9bd269 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leastrequest implements a least request load balancer. +package leastrequest + +import ( + "encoding/json" + "fmt" + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/serviceconfig" +) + +// randuint32 is a global to stub out in tests. +var randuint32 = rand.Uint32 + +// Name is the name of the least request balancer. +const Name = "least_request_experimental" + +var logger = grpclog.Component("least-request") + +func init() { + balancer.Register(bb{}) +} + +// LBConfig is the balancer config for least_request_experimental balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // ChoiceCount is the number of random SubConns to sample to find the one + // with the fewest outstanding requests. If unset, defaults to 2. If set to + // < 2, the config will be rejected, and if set to > 10, will become 10. + ChoiceCount uint32 `json:"choiceCount,omitempty"` +} + +type bb struct{} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbConfig := &LBConfig{ + ChoiceCount: 2, + } + if err := json.Unmarshal(s, lbConfig); err != nil { + return nil, fmt.Errorf("least-request: unable to unmarshal LBConfig: %v", err) + } + // "If `choice_count < 2`, the config will be rejected." - A48 + if lbConfig.ChoiceCount < 2 { // sweet + return nil, fmt.Errorf("least-request: lbConfig.choiceCount: %v, must be >= 2", lbConfig.ChoiceCount) + } + // "If a LeastRequestLoadBalancingConfig with a choice_count > 10 is + // received, the least_request_experimental policy will set choice_count = + // 10." - A48 + if lbConfig.ChoiceCount > 10 { + lbConfig.ChoiceCount = 10 + } + return lbConfig, nil +} + +func (bb) Name() string { + return Name +} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &leastRequestBalancer{scRPCCounts: make(map[balancer.SubConn]*atomic.Int32)} + baseBuilder := base.NewBalancerBuilder(Name, b, base.Config{HealthCheck: true}) + b.Balancer = baseBuilder.Build(cc, bOpts) + return b +} + +type leastRequestBalancer struct { + // Embeds balancer.Balancer because needs to intercept UpdateClientConnState + // to learn about choiceCount. + balancer.Balancer + + choiceCount uint32 + scRPCCounts map[balancer.SubConn]*atomic.Int32 // Hold onto RPC counts to keep track for subsequent picker updates. +} + +func (lrb *leastRequestBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lrCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + logger.Errorf("least-request: received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + lrb.choiceCount = lrCfg.ChoiceCount + return lrb.Balancer.UpdateClientConnState(s) +} + +type scWithRPCCount struct { + sc balancer.SubConn + numRPCs *atomic.Int32 +} + +func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { + if logger.V(2) { + logger.Infof("least-request: Build called with info: %v", info) + } + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + + for sc := range lrb.scRPCCounts { + if _, ok := info.ReadySCs[sc]; !ok { // If no longer ready, no more need for the ref to count active RPCs. + delete(lrb.scRPCCounts, sc) + } + } + + // Create new refs if needed. + for sc := range info.ReadySCs { + if _, ok := lrb.scRPCCounts[sc]; !ok { + lrb.scRPCCounts[sc] = new(atomic.Int32) + } + } + + // Copy refs to counters into picker. + scs := make([]scWithRPCCount, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, scWithRPCCount{ + sc: sc, + numRPCs: lrb.scRPCCounts[sc], // guaranteed to be present due to algorithm + }) + } + + return &picker{ + choiceCount: lrb.choiceCount, + subConns: scs, + } +} + +type picker struct { + // choiceCount is the number of random SubConns to find the one with + // the least request. + choiceCount uint32 + // Built out when receives list of ready RPCs. + subConns []scWithRPCCount +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + var pickedSC *scWithRPCCount + var pickedSCNumRPCs int32 + for i := 0; i < int(p.choiceCount); i++ { + index := randuint32() % uint32(len(p.subConns)) + sc := p.subConns[index] + n := sc.numRPCs.Load() + if pickedSC == nil || n < pickedSCNumRPCs { + pickedSC = &sc + pickedSCNumRPCs = n + } + } + // "The counter for a subchannel should be atomically incremented by one + // after it has been successfully picked by the picker." - A48 + pickedSC.numRPCs.Add(1) + // "the picker should add a callback for atomically decrementing the + // subchannel counter once the RPC finishes (regardless of Status code)." - + // A48. + done := func(balancer.DoneInfo) { + pickedSC.numRPCs.Add(-1) + } + return balancer.PickResult{ + SubConn: pickedSC.sc, + Done: done, + }, nil +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/vendor/google.golang.org/grpc/balancer/rls/balancer.go new file mode 100644 index 000000000..5ae4d2e13 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -0,0 +1,705 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS LB policy. +package rls + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" +) + +const ( + // Name is the name of the RLS LB policy. + // + // It currently has an experimental suffix which would be removed once + // end-to-end testing of the policy is completed. + Name = internal.RLSLoadBalancingPolicyName + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute +) + +var ( + logger = grpclog.Component("rls") + errBalancerClosed = errors.New("rls LB policy is closed") + + // Below defined vars for overriding in unit tests. + + // Default exponential backoff strategy for data cache entries. + defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) + // Ticker used for periodic data cache purging. + dataCachePurgeTicker = func() *time.Ticker { return time.NewTicker(periodicCachePurgeFreq) } + // We want every cache entry to live in the cache for at least this + // duration. If we encounter a cache entry whose minimum expiration time is + // in the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases where + // the cache is too small, when we receive an RLS Response, we keep the + // resulting cache entry around long enough for the pending incoming + // requests to be re-processed through the new Picker. If we didn't do this, + // then we'd risk throwing away each RLS response as we receive it, in which + // case we would fail to actually route any of our incoming requests. + minEvictDuration = 5 * time.Second + + // Following functions are no-ops in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + dataCachePurgeHook = func() {} + resetBackoffHook = func() {} + + cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.default_target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.failed_picks", + Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target"}, + Default: false, + }) +) + +func init() { + balancer.Register(&rlsBB{}) +} + +type rlsBB struct{} + +func (rlsBB) Name() string { + return Name +} + +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + lb := &rlsBalancer{ + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + dataCachePurgeHook: dataCachePurgeHook, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) + lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String()) + lb.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: opts, + StateAggregator: lb, + Logger: lb.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + lb.bg.Start() + go lb.run() + return lb +} + +// rlsBalancer implements the RLS LB policy. +type rlsBalancer struct { + closed *grpcsync.Event // Fires when Close() is invoked. Guarded by stateMu. + done *grpcsync.Event // Fires when Close() is done. + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + dataCachePurgeHook func() + logger *internalgrpclog.PrefixLogger + + // If both cacheMu and stateMu need to be acquired, the former must be + // acquired first to prevent a deadlock. This order restriction is due to the + // fact that in places where we need to acquire both the locks, we always + // start off reading the cache. + + // cacheMu guards access to the data cache and pending requests map. We + // cannot use an RWMutex here since even an operation like + // dataCache.getEntry() modifies the underlying LRU, which is implemented as + // a doubly linked list. + cacheMu sync.Mutex + dataCache *dataCache // Cache of RLS data. + pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. + + // stateMu guards access to all LB policy state. + stateMu sync.Mutex + lbCfg *lbConfig // Most recently received service config. + childPolicyBuilder balancer.Builder // Cached child policy builder. + resolverState resolver.State // Cached resolver state. + ctrlCh *controlChannel // Control channel to the RLS server. + bg *balancergroup.BalancerGroup + childPolicies map[string]*childPolicyWrapper + defaultPolicy *childPolicyWrapper + // A reference to the most recent picker sent to gRPC as part of a state + // update is cached in this field so that we can release the reference to the + // default child policy wrapper when a new picker is created. See + // sendNewPickerLocked() for details. + lastPicker *rlsPicker + // Set during UpdateClientConnState when pushing updates to child policies. + // Prevents state updates from child policies causing new pickers to be sent + // up the channel. Cleared after all child policies have processed the + // updates sent to them, after which a new picker is sent up the channel. + inhibitPickerUpdates bool + + // Channel on which all updates are pushed. Processed in run(). + updateCh *buffer.Unbounded +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// childPolicyIDAndState wraps a child policy id and its state update. +type childPolicyIDAndState struct { + id string + state balancer.State +} + +type controlChannelReady struct{} + +// run is a long-running goroutine which handles all the updates that the +// balancer wishes to handle. The appropriate updateHandler will push the update +// on to a channel that this goroutine will select on, thereby the handling of +// the update will happen asynchronously. +func (b *rlsBalancer) run() { + // We exit out of the for loop below only after `Close()` has been invoked. + // Firing the done event here will ensure that Close() returns only after + // all goroutines are done. + defer func() { b.done.Fire() }() + + // Wait for purgeDataCache() goroutine to exit before returning from here. + doneCh := make(chan struct{}) + defer func() { + <-doneCh + }() + go b.purgeDataCache(doneCh) + + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case childPolicyIDAndState: + b.handleChildPolicyStateUpdate(update.id, update.state) + case controlChannelReady: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case resumePickerUpdates: + b.stateMu.Lock() + b.logger.Infof("Resuming picker updates after config propagation to child policies") + b.inhibitPickerUpdates = false + b.sendNewPickerLocked() + close(update.done) + b.stateMu.Unlock() + default: + b.logger.Errorf("Unsupported update type %T", update) + } + case <-b.closed.Done(): + return + } + } +} + +// purgeDataCache is a long-running goroutine which periodically deletes expired +// entries. An expired entry is one for which both the expiryTime and +// backoffExpiryTime are in the past. +func (b *rlsBalancer) purgeDataCache(doneCh chan struct{}) { + defer close(doneCh) + + for { + select { + case <-b.closed.Done(): + return + case <-b.purgeTicker.C: + b.cacheMu.Lock() + updatePicker := b.dataCache.evictExpiredEntries() + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + b.dataCachePurgeHook() + } + } +} + +func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + defer clientConnUpdateHook() + + b.stateMu.Lock() + if b.closed.HasFired() { + b.stateMu.Unlock() + b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) + return errBalancerClosed + } + + newCfg := ccs.BalancerConfig.(*lbConfig) + if b.lbCfg.Equal(newCfg) { + b.stateMu.Unlock() + b.logger.Infof("New service config matches existing config") + return nil + } + + b.logger.Infof("Delaying picker updates until config is propagated to and processed by child policies") + b.inhibitPickerUpdates = true + + // When the RLS server name changes, the old control channel needs to be + // swapped out for a new one. All state associated with the throttling + // algorithm is stored on a per-control-channel basis; when we swap out + // channels, we also swap out the throttling state. + b.handleControlChannelUpdate(newCfg) + + // Any changes to child policy name or configuration needs to be handled by + // either creating new child policies or pushing updates to existing ones. + b.resolverState = ccs.ResolverState + b.handleChildPolicyConfigUpdate(newCfg, &ccs) + + // Resize the cache if the size in the config has changed. + resizeCache := newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes + + // Update the copy of the config in the LB policy before releasing the lock. + b.lbCfg = newCfg + b.stateMu.Unlock() + + // We cannot do cache operations above because `cacheMu` needs to be grabbed + // before `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.updateRLSServerTarget(newCfg.lookupService) + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. If we do evict an entry with valid backoff timer, + // the new picker needs to be sent to the channel to re-process any + // RPCs queued as a result of this backoff timer. + b.dataCache.resize(newCfg.cacheSizeBytes) + } + b.cacheMu.Unlock() + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + <-done + return nil +} + +// handleControlChannelUpdate handles updates to service config fields which +// influence the control channel to the RLS server. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { + if newCfg.lookupService == b.lbCfg.lookupService && newCfg.lookupServiceTimeout == b.lbCfg.lookupServiceTimeout { + return + } + + // Create a new control channel and close the existing one. + b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) + backToReadyFn := func() { + b.updateCh.Put(controlChannelReady{}) + } + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, backToReadyFn) + if err != nil { + // This is very uncommon and usually represents a non-transient error. + // There is not much we can do here other than wait for another update + // which might fix things. + b.logger.Errorf("Failed to create control channel to %q: %v", newCfg.lookupService, err) + return + } + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.ctrlCh = ctrlCh +} + +// handleChildPolicyConfigUpdate handles updates to service config fields which +// influence child policy configuration. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleChildPolicyConfigUpdate(newCfg *lbConfig, ccs *balancer.ClientConnState) { + // Update child policy builder first since other steps are dependent on this. + if b.childPolicyBuilder == nil || b.childPolicyBuilder.Name() != newCfg.childPolicyName { + b.logger.Infof("Child policy changed to %q", newCfg.childPolicyName) + b.childPolicyBuilder = balancer.Get(newCfg.childPolicyName) + for _, cpw := range b.childPolicies { + // If the child policy has changed, we need to remove the old policy + // from the BalancerGroup and add a new one. The BalancerGroup takes + // care of closing the old one in this case. + b.bg.Remove(cpw.target) + b.bg.Add(cpw.target, b.childPolicyBuilder) + } + } + + configSentToDefault := false + if b.lbCfg.defaultTarget != newCfg.defaultTarget { + // If the default target has changed, create a new childPolicyWrapper for + // the new target if required. If a new wrapper is created, add it to the + // childPolicies map and the BalancerGroup. + b.logger.Infof("Default target in LB config changing from %q to %q", b.lbCfg.defaultTarget, newCfg.defaultTarget) + cpw := b.childPolicies[newCfg.defaultTarget] + if cpw == nil { + cpw = newChildPolicyWrapper(newCfg.defaultTarget) + b.childPolicies[newCfg.defaultTarget] = cpw + b.bg.Add(newCfg.defaultTarget, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", newCfg.defaultTarget) + } + if err := b.buildAndPushChildPolicyConfigs(newCfg.defaultTarget, newCfg, ccs); err != nil { + cpw.lamify(err) + } + + // If an old default exists, release its reference. If this was the last + // reference, remove the child policy from the BalancerGroup and remove the + // corresponding entry the childPolicies map. + if b.defaultPolicy != nil { + if b.defaultPolicy.releaseRef() { + delete(b.childPolicies, b.lbCfg.defaultTarget) + b.bg.Remove(b.defaultPolicy.target) + } + } + b.defaultPolicy = cpw + configSentToDefault = true + } + + // No change in configuration affecting child policies. Return early. + if b.lbCfg.childPolicyName == newCfg.childPolicyName && b.lbCfg.childPolicyTargetField == newCfg.childPolicyTargetField && childPolicyConfigEqual(b.lbCfg.childPolicyConfig, newCfg.childPolicyConfig) { + return + } + + // If fields affecting child policy configuration have changed, the changes + // are pushed to the childPolicyWrapper which handles them appropriately. + for _, cpw := range b.childPolicies { + if configSentToDefault && cpw.target == newCfg.defaultTarget { + // Default target has already been taken care of. + continue + } + if err := b.buildAndPushChildPolicyConfigs(cpw.target, newCfg, ccs); err != nil { + cpw.lamify(err) + } + } +} + +// buildAndPushChildPolicyConfigs builds the final child policy configuration by +// adding the `targetField` to the base child policy configuration received in +// RLS LB policy configuration. The `targetField` is set to target and +// configuration is pushed to the child policy through the BalancerGroup. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) buildAndPushChildPolicyConfigs(target string, newCfg *lbConfig, ccs *balancer.ClientConnState) error { + jsonTarget, err := json.Marshal(target) + if err != nil { + return fmt.Errorf("failed to marshal child policy target %q: %v", target, err) + } + + config := newCfg.childPolicyConfig + targetField := newCfg.childPolicyTargetField + config[targetField] = jsonTarget + jsonCfg, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal child policy config %+v: %v", config, err) + } + + parser, _ := b.childPolicyBuilder.(balancer.ConfigParser) + parsedCfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("childPolicy config parsing failed: %v", err) + } + + state := balancer.ClientConnState{ResolverState: ccs.ResolverState, BalancerConfig: parsedCfg} + b.logger.Infof("Pushing new state to child policy %q: %+v", target, state) + if err := b.bg.UpdateClientConnState(target, state); err != nil { + b.logger.Warningf("UpdateClientConnState(%q, %+v) failed : %v", target, ccs, err) + } + return nil +} + +func (b *rlsBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *rlsBalancer) Close() { + b.stateMu.Lock() + b.closed.Fire() + b.purgeTicker.Stop() + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.bg.Close() + b.stateMu.Unlock() + + b.cacheMu.Lock() + b.dataCache.stop() + b.cacheMu.Unlock() + + b.updateCh.Close() + + <-b.done.Done() +} + +func (b *rlsBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// sendNewPickerLocked pushes a new picker on to the channel. +// +// Note that regardless of what connectivity state is reported, the policy will +// return its own picker, and not a picker that unconditionally queues +// (typically used for IDLE or CONNECTING) or a picker that unconditionally +// fails (typically used for TRANSIENT_FAILURE). This is required because, +// irrespective of the connectivity state, we need to able to perform RLS +// lookups for incoming RPCs and affect the status of queued RPCs based on the +// receipt of RLS responses. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) sendNewPickerLocked() { + aggregatedState := b.aggregatedConnectivityState() + + // Acquire a separate reference for the picker. This is required to ensure + // that the wrapper held by the old picker is not closed when the default + // target changes in the config, and a new wrapper is created for the new + // default target. See handleChildPolicyConfigUpdate() for how config changes + // affecting the default target are handled. + if b.defaultPolicy != nil { + b.defaultPolicy.acquireRef() + } + + picker := &rlsPicker{ + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + rlsServerTarget: b.lbCfg.lookupService, + grpcTarget: b.bopts.Target.String(), + metricsRecorder: b.bopts.MetricsRecorder, + } + picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) + state := balancer.State{ + ConnectivityState: aggregatedState, + Picker: picker, + } + + if !b.inhibitPickerUpdates { + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + } else { + b.logger.Infof("Delaying picker update: %+v", state) + } + + if b.lastPicker != nil { + if b.defaultPolicy != nil { + b.defaultPolicy.releaseRef() + } + } + b.lastPicker = picker +} + +func (b *rlsBalancer) sendNewPicker() { + b.stateMu.Lock() + defer b.stateMu.Unlock() + if b.closed.HasFired() { + return + } + b.sendNewPickerLocked() +} + +// The aggregated connectivity state reported is determined as follows: +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. +// +// If the RLS policy has no child policies and no configured default target, +// then we will report connectivity state IDLE. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { + if len(b.childPolicies) == 0 && b.lbCfg.defaultTarget == "" { + return connectivity.Idle + } + + var readyN, connectingN, idleN int + for _, cpw := range b.childPolicies { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Ready: + readyN++ + case connectivity.Connecting: + connectingN++ + case connectivity.Idle: + idleN++ + } + } + + switch { + case readyN > 0: + return connectivity.Ready + case connectingN > 0: + return connectivity.Connecting + case idleN > 0: + return connectivity.Idle + default: + return connectivity.TransientFailure + } +} + +// UpdateState is a implementation of the balancergroup.BalancerStateAggregator +// interface. The actual state aggregation functionality is handled +// asynchronously. This method only pushes the state update on to channel read +// and dispatched by the run() goroutine. +func (b *rlsBalancer) UpdateState(id string, state balancer.State) { + b.updateCh.Put(childPolicyIDAndState{id: id, state: state}) +} + +// handleChildPolicyStateUpdate provides the state aggregator functionality for +// the BalancerGroup. +// +// This method is invoked by the BalancerGroup whenever a child policy sends a +// state update. We cache the child policy's connectivity state and picker for +// two reasons: +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies +func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { + b.stateMu.Lock() + defer b.stateMu.Unlock() + + cpw := b.childPolicies[id] + if cpw == nil { + // All child policies start with an entry in the map. If ID is not in + // map, it's either been removed, or never existed. + b.logger.Warningf("Received state update %+v for missing child policy %q", newState, id) + return + } + + oldState := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + if oldState.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting { + // Ignore state transitions from TRANSIENT_FAILURE to CONNECTING, and thus + // fail pending RPCs instead of queuing them indefinitely when all + // subChannels are failing, even if the subChannels are bouncing back and + // forth between CONNECTING and TRANSIENT_FAILURE. + return + } + atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) + b.logger.Infof("Child policy %q has new state %+v", id, newState) + b.sendNewPickerLocked() +} + +// acquireChildPolicyReferences attempts to acquire references to +// childPolicyWrappers corresponding to the passed in targets. If there is no +// childPolicyWrapper corresponding to one of the targets, a new one is created +// and added to the BalancerGroup. +func (b *rlsBalancer) acquireChildPolicyReferences(targets []string) []*childPolicyWrapper { + b.stateMu.Lock() + var newChildPolicies []*childPolicyWrapper + for _, target := range targets { + // If the target exists in the LB policy's childPolicies map. a new + // reference is taken here and added to the new list. + if cpw := b.childPolicies[target]; cpw != nil { + cpw.acquireRef() + newChildPolicies = append(newChildPolicies, cpw) + continue + } + + // If the target does not exist in the child policy map, then a new + // child policy wrapper is created and added to the new list. + cpw := newChildPolicyWrapper(target) + b.childPolicies[target] = cpw + b.bg.Add(target, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", target) + newChildPolicies = append(newChildPolicies, cpw) + if err := b.buildAndPushChildPolicyConfigs(target, b.lbCfg, &balancer.ClientConnState{ + ResolverState: b.resolverState, + }); err != nil { + cpw.lamify(err) + } + } + b.stateMu.Unlock() + return newChildPolicies +} + +// releaseChildPolicyReferences releases references to childPolicyWrappers +// corresponding to the passed in targets. If the release reference was the last +// one, the child policy is removed from the BalancerGroup. +func (b *rlsBalancer) releaseChildPolicyReferences(targets []string) { + b.stateMu.Lock() + for _, target := range targets { + if cpw := b.childPolicies[target]; cpw.releaseRef() { + delete(b.childPolicies, cpw.target) + b.bg.Remove(cpw.target) + } + } + b.stateMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/cache.go b/vendor/google.golang.org/grpc/balancer/rls/cache.go new file mode 100644 index 000000000..7fe796c95 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -0,0 +1,383 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "container/list" + "time" + + "github.com/google/uuid" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" +) + +// cacheKey represents the key used to uniquely identify an entry in the data +// cache and in the pending requests map. +type cacheKey struct { + // path is the full path of the incoming RPC request. + path string + // keys is a stringified version of the RLS request key map built using the + // RLS keyBuilder. Since maps are not a type which is comparable in Go, it + // cannot be part of the key for another map (entries in the data cache and + // pending requests map are stored in maps). + keys string +} + +// cacheEntry wraps all the data to be stored in a data cache entry. +type cacheEntry struct { + // childPolicyWrappers contains the list of child policy wrappers + // corresponding to the targets returned by the RLS server for this entry. + childPolicyWrappers []*childPolicyWrapper + // headerData is received in the RLS response and is to be sent in the + // X-Google-RLS-Data header for matching RPCs. + headerData string + // expiryTime is the absolute time at which this cache entry stops + // being valid. When an RLS request succeeds, this is set to the current + // time plus the max_age field from the LB policy config. + expiryTime time.Time + // staleTime is the absolute time after which this cache entry will be + // proactively refreshed if an incoming RPC matches this entry. When an RLS + // request succeeds, this is set to the current time plus the stale_age from + // the LB policy config. + staleTime time.Time + // earliestEvictTime is the absolute time before which this entry should not + // be evicted from the cache. When a cache entry is created, this is set to + // the current time plus a default value of 5 seconds. This is required to + // make sure that a new entry added to the cache is not evicted before the + // RLS response arrives (usually when the cache is too small). + earliestEvictTime time.Time + + // status stores the RPC status of the previous RLS request for this + // entry. Picks for entries with a non-nil value for this field are failed + // with the error stored here. + status error + // backoffState contains all backoff related state. When an RLS request + // succeeds, backoffState is reset. This state moves between the data cache + // and the pending requests map. + backoffState *backoffState + // backoffTime is the absolute time at which the backoff period for this + // entry ends. When an RLS request fails, this is set to the current time + // plus the backoff value returned by the backoffState. The backoff timer is + // also setup with this value. No new RLS requests are sent out for this + // entry until the backoff period ends. + // + // Set to zero time instant upon a successful RLS response. + backoffTime time.Time + // backoffExpiryTime is the absolute time at which an entry which has gone + // through backoff stops being valid. When an RLS request fails, this is + // set to the current time plus twice the backoff time. The cache expiry + // timer will only delete entries for which both expiryTime and + // backoffExpiryTime are in the past. + // + // Set to zero time instant upon a successful RLS response. + backoffExpiryTime time.Time + + // size stores the size of this cache entry. Used to enforce the cache size + // specified in the LB policy configuration. + size int64 +} + +// backoffState wraps all backoff related state associated with a cache entry. +type backoffState struct { + // retries keeps track of the number of RLS failures, to be able to + // determine the amount of time to backoff before the next attempt. + retries int + // bs is the exponential backoff implementation which returns the amount of + // time to backoff, given the number of retries. + bs backoff.Strategy + // timer fires when the backoff period ends and incoming requests after this + // will trigger a new RLS request. + timer *time.Timer +} + +// lru is a cache implementation with a least recently used eviction policy. +// Internally it uses a doubly linked list, with the least recently used element +// at the front of the list and the most recently used element at the back of +// the list. The value stored in this cache will be of type `cacheKey`. +// +// It is not safe for concurrent access. +type lru struct { + ll *list.List + + // A map from the value stored in the lru to its underlying list element is + // maintained to have a clean API. Without this, a subset of the lru's API + // would accept/return cacheKey while another subset would accept/return + // list elements. + m map[cacheKey]*list.Element +} + +// newLRU creates a new cache with a least recently used eviction policy. +func newLRU() *lru { + return &lru{ + ll: list.New(), + m: make(map[cacheKey]*list.Element), + } +} + +func (l *lru) addEntry(key cacheKey) { + e := l.ll.PushBack(key) + l.m[key] = e +} + +func (l *lru) makeRecent(key cacheKey) { + e := l.m[key] + l.ll.MoveToBack(e) +} + +func (l *lru) removeEntry(key cacheKey) { + e := l.m[key] + l.ll.Remove(e) + delete(l.m, key) +} + +func (l *lru) getLeastRecentlyUsed() cacheKey { + e := l.ll.Front() + if e == nil { + return cacheKey{} + } + return e.Value.(cacheKey) +} + +// dataCache contains a cache of RLS data used by the LB policy to make routing +// decisions. +// +// The dataCache will be keyed by the request's path and keys, represented by +// the `cacheKey` type. It will maintain the cache keys in an `lru` and the +// cache data, represented by the `cacheEntry` type, in a native map. +// +// It is not safe for concurrent access. +type dataCache struct { + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event + rlsServerTarget string + + // Read only after initialization. + grpcTarget string + uuid string + metricsRecorder estats.MetricsRecorder +} + +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache { + return &dataCache{ + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + grpcTarget: grpcTarget, + uuid: uuid.New().String(), + metricsRecorder: metricsRecorder, + } +} + +// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is +// configured with. +func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) { + dc.rlsServerTarget = rlsServerTarget +} + +// resize changes the maximum allowed size of the data cache. +// +// The return value indicates if an entry with a valid backoff timer was +// evicted. This is important to the RLS LB policy which would send a new picker +// on the channel to re-process any RPCs queued as a result of this backoff +// timer. +func (dc *dataCache) resize(size int64) (backoffCancelled bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffCancelled = false + for dc.currentSize > size { + key := dc.keys.getLeastRecentlyUsed() + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to resize it", key) + break + } + + // When we encounter a cache entry whose minimum expiration time is in + // the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases + // where the cache is too small, when we receive an RLS Response, we + // keep the resulting cache entry around long enough for the pending + // incoming requests to be re-processed through the new Picker. If we + // didn't do this, then we'd risk throwing away each RLS response as we + // receive it, in which case we would fail to actually route any of our + // incoming requests. + if entry.earliestEvictTime.After(time.Now()) { + dc.logger.Warningf("cachekey %+v is too recent to be evicted. Stopping cache resizing for now", key) + break + } + + // Stop the backoff timer before evicting the entry. + if entry.backoffState != nil && entry.backoffState.timer != nil { + if entry.backoffState.timer.Stop() { + entry.backoffState.timer = nil + backoffCancelled = true + } + } + dc.deleteAndCleanup(key, entry) + } + dc.maxSize = size + return backoffCancelled +} + +// evictExpiredEntries sweeps through the cache and deletes expired entries. An +// expired entry is one for which both the `expiryTime` and `backoffExpiryTime` +// fields are in the past. +// +// The return value indicates if any expired entries were evicted. +// +// The LB policy invokes this method periodically to purge expired entries. +func (dc *dataCache) evictExpiredEntries() bool { + if dc.shutdown.HasFired() { + return false + } + + evicted := false + for key, entry := range dc.entries { + // Only evict entries for which both the data expiration time and + // backoff expiration time fields are in the past. + now := time.Now() + if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { + continue + } + dc.deleteAndCleanup(key, entry) + evicted = true + } + return evicted +} + +// resetBackoffState sweeps through the cache and for entries with a backoff +// state, the backoff timer is cancelled and the backoff state is reset. The +// return value indicates if any entries were mutated in this fashion. +// +// The LB policy invokes this method when the control channel moves from READY +// to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on +// the `controlChannel` type for more details. +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) bool { + if dc.shutdown.HasFired() { + return false + } + + backoffReset := false + for _, entry := range dc.entries { + if entry.backoffState == nil { + continue + } + if entry.backoffState.timer != nil { + entry.backoffState.timer.Stop() + entry.backoffState.timer = nil + } + entry.backoffState = &backoffState{bs: newBackoffState.bs} + entry.backoffTime = time.Time{} + entry.backoffExpiryTime = time.Time{} + backoffReset = true + } + return backoffReset +} + +// addEntry adds a cache entry for the given key. +// +// Return value backoffCancelled indicates if a cache entry with a valid backoff +// timer was evicted to make space for the current entry. This is important to +// the RLS LB policy which would send a new picker on the channel to re-process +// any RPCs queued as a result of this backoff timer. +// +// Return value ok indicates if entry was successfully added to the cache. +func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled bool, ok bool) { + if dc.shutdown.HasFired() { + return false, false + } + + // Handle the extremely unlikely case that a single entry is bigger than the + // size of the cache. + if entry.size > dc.maxSize { + return false, false + } + dc.entries[key] = entry + dc.currentSize += entry.size + dc.keys.addEntry(key) + // If the new entry makes the cache go over its configured size, remove some + // old entries. + if dc.currentSize > dc.maxSize { + backoffCancelled = dc.resize(dc.maxSize) + } + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + return backoffCancelled, true +} + +// updateEntrySize updates the size of a cache entry and the current size of the +// data cache. An entry's size can change upon receipt of an RLS response. +func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { + dc.currentSize -= entry.size + entry.size = newSize + dc.currentSize += entry.size + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) +} + +func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { + if dc.shutdown.HasFired() { + return nil + } + + entry, ok := dc.entries[key] + if !ok { + return nil + } + dc.keys.makeRecent(key) + return entry +} + +func (dc *dataCache) removeEntryForTesting(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + return + } + dc.deleteAndCleanup(key, entry) +} + +// deleteAndCleanup performs actions required at the time of deleting an entry +// from the data cache. +// - the entry is removed from the map of entries +// - current size of the data cache is update +// - the key is removed from the LRU +func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { + delete(dc.entries, key) + dc.currentSize -= entry.size + dc.keys.removeEntry(key) + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) +} + +func (dc *dataCache) stop() { + for key, entry := range dc.entries { + dc.deleteAndCleanup(key, entry) + } + dc.shutdown.Fire() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/child_policy.go b/vendor/google.golang.org/grpc/balancer/rls/child_policy.go new file mode 100644 index 000000000..c74184cac --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/child_policy.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +// childPolicyWrapper is a reference counted wrapper around a child policy. +// +// The LB policy maintains a map of these wrappers keyed by the target returned +// by RLS. When a target is seen for the first time, a child policy wrapper is +// created for it and the wrapper is added to the child policy map. Each entry +// in the data cache holds references to the corresponding child policy +// wrappers. The LB policy also holds a reference to the child policy wrapper +// for the default target specified in the LB Policy Configuration +// +// When a cache entry is evicted, it releases references to the child policy +// wrappers that it contains. When all references have been released, the +// wrapper is removed from the child policy map and is destroyed. +// +// The child policy wrapper also caches the connectivity state and most recent +// picker from the child policy. Once the child policy wrapper reports +// TRANSIENT_FAILURE, it will continue reporting that state until it goes READY; +// transitions from TRANSIENT_FAILURE to CONNECTING are ignored. +// +// Whenever a child policy wrapper changes its connectivity state, the LB policy +// returns a new picker to the channel, since the channel may need to re-process +// the picks for queued RPCs. +// +// It is not safe for concurrent access. +type childPolicyWrapper struct { + logger *internalgrpclog.PrefixLogger + target string // RLS target corresponding to this child policy. + refCnt int // Reference count. + + // Balancer state reported by the child policy. The RLS LB policy maintains + // these child policies in a BalancerGroup. The state reported by the child + // policy is pushed to the state aggregator (which is also implemented by the + // RLS LB policy) and cached here. See handleChildPolicyStateUpdate() for + // details on how the state aggregation is performed. + // + // While this field is written to by the LB policy, it is read by the picker + // at Pick time. Making this an atomic to enable the picker to read this value + // without a mutex. + state unsafe.Pointer // *balancer.State +} + +// newChildPolicyWrapper creates a child policy wrapper for the given target, +// and is initialized with one reference and starts off in CONNECTING state. +func newChildPolicyWrapper(target string) *childPolicyWrapper { + c := &childPolicyWrapper{ + target: target, + refCnt: 1, + state: unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }), + } + c.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-child-policy-wrapper %s %p] ", c.target, c)) + c.logger.Infof("Created") + return c +} + +// acquireRef increments the reference count on the child policy wrapper. +func (c *childPolicyWrapper) acquireRef() { + c.refCnt++ +} + +// releaseRef decrements the reference count on the child policy wrapper. The +// return value indicates whether the released reference was the last one. +func (c *childPolicyWrapper) releaseRef() bool { + c.refCnt-- + return c.refCnt == 0 +} + +// lamify causes the child policy wrapper to return a picker which will always +// fail requests. This is used when the wrapper runs into errors when trying to +// build and parse the child policy configuration. +func (c *childPolicyWrapper) lamify(err error) { + c.logger.Warningf("Entering lame mode: %v", err) + atomic.StorePointer(&c.state, unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + })) +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/config.go b/vendor/google.golang.org/grpc/balancer/rls/config.go new file mode 100644 index 000000000..439581c78 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Default max_age if not specified (or greater than this value) in the + // service config. + maxMaxAge = 5 * time.Minute + // Upper limit for cache_size since we don't fully trust the service config. + maxCacheSize = 5 * 1024 * 1024 * 8 // 5MB in bytes + // Default lookup_service_timeout if not specified in the service config. + defaultLookupServiceTimeout = 10 * time.Second + // Default value for targetNameField in the child policy config during + // service config validation. + dummyChildPolicyTarget = "target_name_to_be_filled_in_later" +) + +// lbConfig is the internal representation of the RLS LB policy's config. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + cacheSizeBytes int64 // Keep this field 64-bit aligned. + kbMap keys.BuilderMap + lookupService string + lookupServiceTimeout time.Duration + maxAge time.Duration + staleAge time.Duration + defaultTarget string + + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string + controlChannelServiceConfig string +} + +func (lbCfg *lbConfig) Equal(other *lbConfig) bool { + return lbCfg.kbMap.Equal(other.kbMap) && + lbCfg.lookupService == other.lookupService && + lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && + lbCfg.maxAge == other.maxAge && + lbCfg.staleAge == other.staleAge && + lbCfg.cacheSizeBytes == other.cacheSizeBytes && + lbCfg.defaultTarget == other.defaultTarget && + lbCfg.childPolicyName == other.childPolicyName && + lbCfg.childPolicyTargetField == other.childPolicyTargetField && + lbCfg.controlChannelServiceConfig == other.controlChannelServiceConfig && + childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) +} + +func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { + if (b == nil) != (a == nil) { + return false + } + if len(b) != len(a) { + return false + } + for k, jsonA := range a { + jsonB, ok := b[k] + if !ok { + return false + } + if !bytes.Equal(jsonA, jsonB) { + return false + } + } + return true +} + +// This struct resembles the JSON representation of the loadBalancing config +// and makes it easier to unmarshal. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage + RouteLookupChannelServiceConfig json.RawMessage + ChildPolicy []map[string]json.RawMessage + ChildPolicyConfigTargetFieldName string +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +// +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config +// +// - childPolicy: +// - must find a valid child policy with a valid config +// +// - childPolicyConfigTargetFieldName: +// - must be set and non-empty +func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + cfgJSON := &lbConfigJSON{} + if err := json.Unmarshal(c, cfgJSON); err != nil { + return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) + } + + m := protojson.UnmarshalOptions{DiscardUnknown: true} + rlsProto := &rlspb.RouteLookupConfig{} + if err := m.Unmarshal(cfgJSON.RouteLookupConfig, rlsProto); err != nil { + return nil, fmt.Errorf("rls: bad RouteLookupConfig proto %+v: %v", string(cfgJSON.RouteLookupConfig), err) + } + lbCfg, err := parseRLSProto(rlsProto) + if err != nil { + return nil, err + } + + if sc := string(cfgJSON.RouteLookupChannelServiceConfig); sc != "" { + parsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(sc) + if parsed.Err != nil { + return nil, fmt.Errorf("rls: bad control channel service config %q: %v", sc, parsed.Err) + } + lbCfg.controlChannelServiceConfig = sc + } + + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) + } + name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) + if err != nil { + return nil, err + } + lbCfg.childPolicyName = name + lbCfg.childPolicyConfig = config + lbCfg.childPolicyTargetField = cfgJSON.ChildPolicyConfigTargetFieldName + return lbCfg, nil +} + +func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { + // Validations specified on the `grpc_keybuilders` field are performed here. + kbMap, err := keys.MakeBuilderMap(rlsProto) + if err != nil { + return nil, err + } + + // `lookup_service` field must be set and must parse as a target URI. + lookupService := rlsProto.GetLookupService() + if lookupService == "" { + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) + } + parsedTarget, err := url.Parse(lookupService) + if err != nil { + // url.Parse() fails if scheme is missing. Retry with default scheme. + parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) + if err != nil { + return nil, fmt.Errorf("rls: invalid target URI in lookup_service %s", lookupService) + } + } + if parsedTarget.Scheme == "" { + parsedTarget.Scheme = resolver.GetDefaultScheme() + } + if resolver.Get(parsedTarget.Scheme) == nil { + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service %s", lookupService) + } + + lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config %+v: %v", rlsProto, err) + } + if lookupServiceTimeout == 0 { + lookupServiceTimeout = defaultLookupServiceTimeout + } + + // Validations performed here: + // - if `max_age` > 5m, it should be set to 5 minutes + // - if `stale_age` > `max_age`, ignore it + // - if `stale_age` is set, then `max_age` must also be set + maxAge, err := convertDuration(rlsProto.GetMaxAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config %+v: %v", rlsProto, err) + } + staleAge, err := convertDuration(rlsProto.GetStaleAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config %+v: %v", rlsProto, err) + } + if staleAge != 0 && maxAge == 0 { + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config %+v", rlsProto) + } + if staleAge >= maxAge { + logger.Infof("rls: stale_age %v is not less than max_age %v, ignoring it", staleAge, maxAge) + staleAge = 0 + } + if maxAge == 0 || maxAge > maxMaxAge { + logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) + maxAge = maxMaxAge + } + + // `cache_size_bytes` field must have a value greater than 0, and if its + // value is greater than 5M, we cap it at 5M + cacheSizeBytes := rlsProto.GetCacheSizeBytes() + if cacheSizeBytes <= 0 { + return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) + } + if cacheSizeBytes > maxCacheSize { + logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + cacheSizeBytes = maxCacheSize + } + return &lbConfig{ + kbMap: kbMap, + lookupService: lookupService, + lookupServiceTimeout: lookupServiceTimeout, + maxAge: maxAge, + staleAge: staleAge, + cacheSizeBytes: cacheSizeBytes, + defaultTarget: rlsProto.GetDefaultTarget(), + }, nil +} + +// parseChildPolicyConfigs iterates through the list of child policies and picks +// the first registered policy and validates its config. +func parseChildPolicyConfigs(childPolicies []map[string]json.RawMessage, targetFieldName string) (string, map[string]json.RawMessage, error) { + for i, config := range childPolicies { + if len(config) != 1 { + return "", nil, fmt.Errorf("rls: invalid childPolicy: entry %v does not contain exactly 1 policy/config pair: %q", i, config) + } + + var name string + var rawCfg json.RawMessage + for name, rawCfg = range config { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + return "", nil, fmt.Errorf("rls: childPolicy %q with config %q does not support config parsing", name, string(rawCfg)) + } + + // To validate child policy configs we do the following: + // - unmarshal the raw JSON bytes of the child policy config into a map + // - add an entry with key set to `target_field_name` and a dummy value + // - marshal the map back to JSON and parse the config using the parser + // retrieved previously + var childConfig map[string]json.RawMessage + if err := json.Unmarshal(rawCfg, &childConfig); err != nil { + return "", nil, fmt.Errorf("rls: json unmarshal failed for child policy config %q: %v", string(rawCfg), err) + } + childConfig[targetFieldName], _ = json.Marshal(dummyChildPolicyTarget) + jsonCfg, err := json.Marshal(childConfig) + if err != nil { + return "", nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) + } + if _, err := parser.ParseConfig(jsonCfg); err != nil { + return "", nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + } + return name, childConfig, nil + } + return "", nil, fmt.Errorf("rls: invalid childPolicy config: no supported policies found in %+v", childPolicies) +} + +func convertDuration(d *durationpb.Duration) (time.Duration, error) { + if d == nil { + return 0, nil + } + return d.AsDuration(), d.CheckValid() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go new file mode 100644 index 000000000..4acc11d90 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go @@ -0,0 +1,220 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/adaptive" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" +) + +var newAdaptiveThrottler = func() adaptiveThrottler { return adaptive.New() } + +type adaptiveThrottler interface { + ShouldThrottle() bool + RegisterBackendResponse(throttled bool) +} + +// controlChannel is a wrapper around the gRPC channel to the RLS server +// specified in the service config. +type controlChannel struct { + // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB + // policy receives this value in its service config. + rpcTimeout time.Duration + // backToReadyFunc is a callback to be invoked when the connectivity state + // changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyFunc func() + // throttler in an adaptive throttling implementation used to avoid + // hammering the RLS service while it is overloaded or down. + throttler adaptiveThrottler + + cc *grpc.ClientConn + client rlsgrpc.RouteLookupServiceClient + logger *internalgrpclog.PrefixLogger +} + +// newControlChannel creates a controlChannel to rlsServerName and uses +// serviceConfig, if non-empty, as the default service config for the underlying +// gRPC channel. +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyFunc func()) (*controlChannel, error) { + ctrlCh := &controlChannel{ + rpcTimeout: rpcTimeout, + backToReadyFunc: backToReadyFunc, + throttler: newAdaptiveThrottler(), + } + ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) + + dopts, err := ctrlCh.dialOpts(bOpts, serviceConfig) + if err != nil { + return nil, err + } + ctrlCh.cc, err = grpc.Dial(rlsServerName, dopts...) + if err != nil { + return nil, err + } + ctrlCh.client = rlsgrpc.NewRouteLookupServiceClient(ctrlCh.cc) + ctrlCh.logger.Infof("Control channel created to RLS server at: %v", rlsServerName) + + go ctrlCh.monitorConnectivityState() + return ctrlCh, nil +} + +// dialOpts constructs the dial options for the control plane channel. +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig string) ([]grpc.DialOption, error) { + // The control plane channel will use the same authority as the parent + // channel for server authorization. This ensures that the identity of the + // RLS server and the identity of the backends is the same, so if the RLS + // config is injected by an attacker, it cannot cause leakage of private + // information contained in headers set by the application. + dopts := []grpc.DialOption{grpc.WithAuthority(bOpts.Authority)} + if bOpts.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(bOpts.Dialer)) + } + + // The control channel will use the channel credentials from the parent + // channel, including any call creds associated with the channel creds. + var credsOpt grpc.DialOption + switch { + case bOpts.DialCreds != nil: + credsOpt = grpc.WithTransportCredentials(bOpts.DialCreds.Clone()) + case bOpts.CredsBundle != nil: + // The "fallback" mode in google default credentials (which is the only + // type of credentials we expect to be used with RLS) uses TLS/ALTS + // creds for transport and uses the same call creds as that on the + // parent bundle. + bundle, err := bOpts.CredsBundle.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + return nil, err + } + credsOpt = grpc.WithCredentialsBundle(bundle) + default: + cc.logger.Warningf("no credentials available, using Insecure") + credsOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) + } + dopts = append(dopts, credsOpt) + + // If the RLS LB policy's configuration specified a service config for the + // control channel, use that and disable service config fetching via the name + // resolver for the control channel. + if serviceConfig != "" { + cc.logger.Infof("Disabling service config from the name resolver and instead using: %s", serviceConfig) + dopts = append(dopts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(serviceConfig)) + } + + return dopts, nil +} + +func (cc *controlChannel) monitorConnectivityState() { + cc.logger.Infof("Starting connectivity state monitoring goroutine") + // Since we use two mechanisms to deal with RLS server being down: + // - adaptive throttling for the channel as a whole + // - exponential backoff on a per-request basis + // we need a way to avoid double-penalizing requests by counting failures + // toward both mechanisms when the RLS server is unreachable. + // + // To accomplish this, we monitor the state of the control plane channel. If + // the state has been TRANSIENT_FAILURE since the last time it was in state + // READY, and it then transitions into state READY, we push on a channel + // which is being read by the LB policy. + // + // The LB the policy will iterate through the cache to reset the backoff + // timeouts in all cache entries. Specifically, this means that it will + // reset the backoff state and cancel the pending backoff timer. Note that + // when cancelling the backoff timer, just like when the backoff timer fires + // normally, a new picker is returned to the channel, to force it to + // re-process any wait-for-ready RPCs that may still be queued if we failed + // them while we were in backoff. However, we should optimize this case by + // returning only one new picker, regardless of how many backoff timers are + // cancelled. + + // Using the background context is fine here since we check for the ClientConn + // entering SHUTDOWN and return early in that case. + ctx := context.Background() + + first := true + for { + // Wait for the control channel to become READY. + for s := cc.cc.GetState(); s != connectivity.Ready; s = cc.cc.GetState() { + if s == connectivity.Shutdown { + return + } + cc.cc.WaitForStateChange(ctx, s) + } + cc.logger.Infof("Connectivity state is READY") + + if !first { + cc.logger.Infof("Control channel back to READY") + cc.backToReadyFunc() + } + first = false + + // Wait for the control channel to move out of READY. + cc.cc.WaitForStateChange(ctx, connectivity.Ready) + if cc.cc.GetState() == connectivity.Shutdown { + return + } + cc.logger.Infof("Connectivity state is %s", cc.cc.GetState()) + } +} + +func (cc *controlChannel) close() { + cc.logger.Infof("Closing control channel") + cc.cc.Close() +} + +type lookupCallback func(targets []string, headerData string, err error) + +// lookup starts a RouteLookup RPC in a separate goroutine and returns the +// results (and error, if any) in the provided callback. +// +// The returned boolean indicates whether the request was throttled by the +// client-side adaptive throttling algorithm in which case the provided callback +// will not be invoked. +func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string, cb lookupCallback) (throttled bool) { + if cc.throttler.ShouldThrottle() { + cc.logger.Infof("RLS request throttled by client-side adaptive throttling") + return true + } + go func() { + req := &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: reqKeys, + Reason: reason, + StaleHeaderData: staleHeaders, + } + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + + ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) + defer cancel() + resp, err := cc.client.RouteLookup(ctx, req) + cb(resp.GetTargets(), resp.GetHeaderData(), err) + }() + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go new file mode 100644 index 000000000..8b1786043 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package adaptive provides functionality for adaptive client-side throttling. +package adaptive + +import ( + "math/rand" + "sync" + "time" +) + +// For overriding in unittests. +var ( + timeNowFunc = func() time.Time { return time.Now() } + randFunc = func() float64 { return rand.Float64() } +) + +const ( + defaultDuration = 30 * time.Second + defaultBins = 100 + defaultRatioForAccepts = 2.0 + defaultRequestsPadding = 8.0 +) + +// Throttler implements a client-side throttling recommendation system. All +// methods are safe for concurrent use by multiple goroutines. +// +// The throttler has the following knobs for which we will use defaults for +// now. If there is a need to make them configurable at a later point in time, +// support for the same will be added. +// - Duration: amount of recent history that will be taken into account for +// making client-side throttling decisions. A default of 30 seconds is used. +// - Bins: number of bins to be used for bucketing historical data. A default +// of 100 is used. +// - RatioForAccepts: ratio by which accepts are multiplied, typically a value +// slightly larger than 1.0. This is used to make the throttler behave as if +// the backend had accepted more requests than it actually has, which lets us +// err on the side of sending to the backend more requests than we think it +// will accept for the sake of speeding up the propagation of state. A +// default of 2.0 is used. +// - RequestsPadding: is used to decrease the (client-side) throttling +// probability in the low QPS regime (to speed up propagation of state), as +// well as to safeguard against hitting a client-side throttling probability +// of 100%. The weight of this value decreases as the number of requests in +// recent history grows. A default of 8 is used. +// +// The adaptive throttler attempts to estimate the probability that a request +// will be throttled using recent history. Server requests (both throttled and +// accepted) are registered with the throttler (via the RegisterBackendResponse +// method), which then recommends client-side throttling (via the +// ShouldThrottle method) with probability given by: +// (requests - RatioForAccepts * accepts) / (requests + RequestsPadding) +type Throttler struct { + ratioForAccepts float64 + requestsPadding float64 + + // Number of total accepts and throttles in the lookback period. + mu sync.Mutex + accepts *lookback + throttles *lookback +} + +// New initializes a new adaptive throttler with the default values. +func New() *Throttler { + return newWithArgs(defaultDuration, defaultBins, defaultRatioForAccepts, defaultRequestsPadding) +} + +// newWithArgs initializes a new adaptive throttler with the provided values. +// Used only in unittests. +func newWithArgs(duration time.Duration, bins int64, ratioForAccepts, requestsPadding float64) *Throttler { + return &Throttler{ + ratioForAccepts: ratioForAccepts, + requestsPadding: requestsPadding, + accepts: newLookback(bins, duration), + throttles: newLookback(bins, duration), + } +} + +// ShouldThrottle returns a probabilistic estimate of whether the server would +// throttle the next request. This should be called for every request before +// allowing it to hit the network. If the returned value is true, the request +// should be aborted immediately (as if it had been throttled by the server). +func (t *Throttler) ShouldThrottle() bool { + randomProbability := randFunc() + now := timeNowFunc() + + t.mu.Lock() + defer t.mu.Unlock() + + accepts, throttles := float64(t.accepts.sum(now)), float64(t.throttles.sum(now)) + requests := accepts + throttles + throttleProbability := (requests - t.ratioForAccepts*accepts) / (requests + t.requestsPadding) + if throttleProbability <= randomProbability { + return false + } + + t.throttles.add(now, 1) + return true +} + +// RegisterBackendResponse registers a response received from the backend for a +// request allowed by ShouldThrottle. This should be called for every response +// received from the backend (i.e., once for each request for which +// ShouldThrottle returned false). +func (t *Throttler) RegisterBackendResponse(throttled bool) { + now := timeNowFunc() + + t.mu.Lock() + if throttled { + t.throttles.add(now, 1) + } else { + t.accepts.add(now, 1) + } + t.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go new file mode 100644 index 000000000..1ab874c35 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package adaptive + +import "time" + +// lookback implements a moving sum over an int64 timeline. +type lookback struct { + bins int64 // Number of bins to use for lookback. + width time.Duration // Width of each bin. + + head int64 // Absolute bin index (time * bins / duration) of the current head bin. + total int64 // Sum over all the values in buf, within the lookback window behind head. + buf []int64 // Ring buffer for keeping track of the sum elements. +} + +// newLookback creates a new lookback for the given duration with a set number +// of bins. +func newLookback(bins int64, duration time.Duration) *lookback { + return &lookback{ + bins: bins, + width: duration / time.Duration(bins), + buf: make([]int64, bins), + } +} + +// add is used to increment the lookback sum. +func (l *lookback) add(t time.Time, v int64) { + pos := l.advance(t) + + if (l.head - pos) >= l.bins { + // Do not increment counters if pos is more than bins behind head. + return + } + l.buf[pos%l.bins] += v + l.total += v +} + +// sum returns the sum of the lookback buffer at the given time or head, +// whichever is greater. +func (l *lookback) sum(t time.Time) int64 { + l.advance(t) + return l.total +} + +// advance prepares the lookback buffer for calls to add() or sum() at time t. +// If head is greater than t then the lookback buffer will be untouched. The +// absolute bin index corresponding to t is returned. It will always be less +// than or equal to head. +func (l *lookback) advance(t time.Time) int64 { + ch := l.head // Current head bin index. + nh := t.UnixNano() / l.width.Nanoseconds() // New head bin index. + + if nh <= ch { + // Either head unchanged or clock jitter (time has moved backwards). Do + // not advance. + return nh + } + + jmax := min(l.bins, nh-ch) + for j := int64(0); j < jmax; j++ { + i := (ch + j + 1) % l.bins + l.total -= l.buf[i] + l.buf[i] = 0 + } + l.head = nh + return nh +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go new file mode 100644 index 000000000..cc5ce510a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -0,0 +1,267 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keys provides functionality required to build RLS request keys. +package keys + +import ( + "errors" + "fmt" + "sort" + "strings" + + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" +) + +// BuilderMap maps from request path to the key builder for that path. +type BuilderMap map[string]builder + +// MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map +// from paths to key builders. +func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { + kbs := cfg.GetGrpcKeybuilders() + if len(kbs) == 0 { + return nil, errors.New("rls: RouteLookupConfig does not contain any GrpcKeyBuilder") + } + + bm := make(map[string]builder) + for _, kb := range kbs { + // Extract keys from `headers`, `constant_keys` and `extra_keys` fields + // and populate appropriate values in the builder struct. Also ensure + // that keys are not repeated. + var matchers []matcher + seenKeys := make(map[string]bool) + constantKeys := kb.GetConstantKeys() + for k := range kb.GetConstantKeys() { + seenKeys[k] = true + } + for _, h := range kb.GetHeaders() { + if h.GetRequiredMatch() { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) + } + key := h.GetKey() + if seenKeys[key] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q across headers, constant_keys and extra_keys {%+v}", key, kbs) + } + seenKeys[key] = true + matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) + } + if seenKeys[kb.GetExtraKeys().GetHost()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetHost(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetService()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetService(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetMethod()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetMethod(), kbs) + } + b := builder{ + headerKeys: matchers, + constantKeys: constantKeys, + hostKey: kb.GetExtraKeys().GetHost(), + serviceKey: kb.GetExtraKeys().GetService(), + methodKey: kb.GetExtraKeys().GetMethod(), + } + + // Store the builder created above in the BuilderMap based on the value + // of the `Names` field, which wraps incoming request's service and + // method. Also, ensure that there are no repeated `Names` field. + names := kb.GetNames() + if len(names) == 0 { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) + } + for _, name := range names { + if name.GetService() == "" { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service {%+v}", kbs) + } + if strings.Contains(name.GetMethod(), `/`) { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash {%+v}", kbs) + } + path := "/" + name.GetService() + "/" + name.GetMethod() + if _, ok := bm[path]; ok { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Name field {%+v}", kbs) + } + bm[path] = b + } + } + return bm, nil +} + +// KeyMap represents the RLS keys to be used for a request. +type KeyMap struct { + // Map is the representation of an RLS key as a Go map. This is used when + // an actual RLS request is to be sent out on the wire, since the + // RouteLookupRequest proto expects a Go map. + Map map[string]string + // Str is the representation of an RLS key as a string, sorted by keys. + // Since the RLS keys are part of the cache key in the request cache + // maintained by the RLS balancer, and Go maps cannot be used as keys for + // Go maps (the cache is implemented as a map), we need a stringified + // version of it. + Str string +} + +// RLSKey builds the RLS keys to be used for the given request, identified by +// the request path and the request headers stored in metadata. +func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + // The path passed in is of the form "/service/method". The keyBuilderMap is + // indexed with keys of the form "/service/" or "/service/method". The service + // that we set in the keyMap (to be sent out in the RLS request) should not + // include any slashes though. + i := strings.LastIndex(path, "/") + service, method := path[:i+1], path[i+1:] + b, ok := bm[path] + if !ok { + b, ok = bm[service] + if !ok { + return KeyMap{} + } + } + + kvMap := b.buildHeaderKeys(md) + if b.hostKey != "" { + kvMap[b.hostKey] = host + } + if b.serviceKey != "" { + kvMap[b.serviceKey] = strings.Trim(service, "/") + } + if b.methodKey != "" { + kvMap[b.methodKey] = method + } + for k, v := range b.constantKeys { + kvMap[k] = v + } + return KeyMap{Map: kvMap, Str: mapToString(kvMap)} +} + +// Equal reports whether bm and am represent equivalent BuilderMaps. +func (bm BuilderMap) Equal(am BuilderMap) bool { + if (bm == nil) != (am == nil) { + return false + } + if len(bm) != len(am) { + return false + } + + for key, bBuilder := range bm { + aBuilder, ok := am[key] + if !ok { + return false + } + if !bBuilder.Equal(aBuilder) { + return false + } + } + return true +} + +// builder provides the actual functionality of building RLS keys. +type builder struct { + headerKeys []matcher + constantKeys map[string]string + // The following keys mirror corresponding fields in `extra_keys`. + hostKey string + serviceKey string + methodKey string +} + +// Equal reports whether b and a represent equivalent key builders. +func (b builder) Equal(a builder) bool { + if len(b.headerKeys) != len(a.headerKeys) { + return false + } + // Protobuf serialization maintains the order of repeated fields. Matchers + // are specified as a repeated field inside the KeyBuilder proto. If the + // order changes, it means that the order in the protobuf changed. We report + // this case as not being equal even though the builders could possible be + // functionally equal. + for i, bMatcher := range b.headerKeys { + aMatcher := a.headerKeys[i] + if !bMatcher.Equal(aMatcher) { + return false + } + } + + if len(b.constantKeys) != len(a.constantKeys) { + return false + } + for k, v := range b.constantKeys { + if a.constantKeys[k] != v { + return false + } + } + + return b.hostKey == a.hostKey && b.serviceKey == a.serviceKey && b.methodKey == a.methodKey +} + +// matcher helps extract a key from request headers based on a given name. +type matcher struct { + // The key used in the keyMap sent as part of the RLS request. + key string + // List of header names which can supply the value for this key. + names []string +} + +// Equal reports if m and a are equivalent headerKeys. +func (m matcher) Equal(a matcher) bool { + if m.key != a.key { + return false + } + if len(m.names) != len(a.names) { + return false + } + for i := 0; i < len(m.names); i++ { + if m.names[i] != a.names[i] { + return false + } + } + return true +} + +func (b builder) buildHeaderKeys(md metadata.MD) map[string]string { + kvMap := make(map[string]string) + if len(md) == 0 { + return kvMap + } + for _, m := range b.headerKeys { + for _, name := range m.names { + if vals := md.Get(name); vals != nil { + kvMap[m.key] = strings.Join(vals, ",") + break + } + } + } + return kvMap +} + +func mapToString(kv map[string]string) string { + keys := make([]string, 0, len(kv)) + for k := range kv { + keys = append(keys, k) + } + sort.Strings(keys) + var sb strings.Builder + for i, k := range keys { + if i != 0 { + fmt.Fprint(&sb, ",") + } + fmt.Fprintf(&sb, "%s=%s", k, kv[k]) + } + return sb.String() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/picker.go b/vendor/google.golang.org/grpc/balancer/rls/picker.go new file mode 100644 index 000000000..e5c86f290 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -0,0 +1,397 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + errRLSThrottled = errors.New("RLS call throttled at client side") + + // Function to compute data cache entry size. + computeDataCacheEntrySize = dcEntrySize +) + +// exitIdler wraps the only method on the BalancerGroup that the picker calls. +type exitIdler interface { + ExitIdleOne(id string) +} + +// rlsPicker selects the subConn to be used for a particular RPC. It does not +// manage subConns directly and delegates to pickers provided by child policies. +type rlsPicker struct { + // The keyBuilder map used to generate RLS keys for the RPC. This is built + // by the LB policy based on the received ServiceConfig. + kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string + + lb *rlsBalancer + + // The picker is given its own copy of the below fields from the RLS LB policy + // to avoid having to grab the mutex on the latter. + rlsServerTarget string + grpcTarget string + metricsRecorder estats.MetricsRecorder + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger +} + +// isFullMethodNameValid return true if name is of the form `/service/method`. +func isFullMethodNameValid(name string) bool { + return strings.HasPrefix(name, "/") && strings.Count(name, "/") == 2 +} + +// Pick makes the routing decision for every outbound RPC. +func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if name := info.FullMethodName; !isFullMethodNameValid(name) { + return balancer.PickResult{}, fmt.Errorf("rls: method name %q is not of the form '/service/method", name) + } + + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) + + p.lb.cacheMu.Lock() + var pr balancer.PickResult + var err error + + // Record metrics without the cache mutex held, to prevent lock contention + // between concurrent RPC's and their Pick calls. Metrics Recording can + // potentially be expensive. + metricsCallback := func() {} + defer func() { + p.lb.cacheMu.Unlock() + metricsCallback() + }() + + // Lookup data cache and pending request map using request path and keys. + cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + now := time.Now() + + switch { + // No data cache entry. No pending request. + case dcEntry == nil && pendingEntry == nil: + throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // No data cache entry. Pending request exits. + case dcEntry == nil && pendingEntry != nil: + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. No pending request. + case dcEntry != nil && pendingEntry == nil: + if dcEntry.expiryTime.After(now) { + if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { + p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) + } + // Delegate to child policies. + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err + } + + // We get here only if the data cache entry has expired. If entry is in + // backoff, delegate to default target or fail the pick. + if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + st := dcEntry.status + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + return pr, err + } + + // We get here only if the entry has expired and is not in backoff. + throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. Pending request exists. + default: + if dcEntry.expiryTime.After(now) { + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err + } + // Data cache entry has expired and pending request exists. Queue pick. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// errToPickResult is a helper function which converts the error value returned +// by Pick() to a string that represents the pick result. +func errToPickResult(err error) string { + if err == nil { + return "complete" + } + if errors.Is(err, balancer.ErrNoSubConnAvailable) { + return "queue" + } + if _, ok := status.FromError(err); ok { + return "drop" + } + return "fail" +} + +// delegateToChildPoliciesLocked is a helper function which iterates through the +// list of child policy wrappers in a cache entry and attempts to find a child +// policy to which this RPC can be routed to. If all child policies are in +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) { + const rlsDataHeaderName = "x-google-rls-data" + for i, cpw := range dcEntry.childPolicyWrappers { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if + // it is the last one (which handles the case of delegating to the last + // child picker if all child policies are in TRANSIENT_FAILURE). + if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { + // Any header data received from the RLS server is stored in the + // cache entry and needs to be sent to the actual backend in the + // X-Google-RLS-Data header. + res, err := state.Picker.Pick(info) + if err != nil { + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr) + }, err + } + + if res.Metadata == nil { + res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + } else { + res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) + } + return res, func() { + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete") + }, nil + } + } + + // In the unlikely event that we have a cache entry with no targets, we end up + // queueing the RPC. + return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable +} + +// useDefaultPickIfPossible is a helper method which delegates to the default +// target if one is configured, or fails the pick with the given error. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) { + if p.defaultPolicy != nil { + state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) + res, err := state.Picker.Pick(info) + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr) + }, err + } + + return balancer.PickResult{}, func() { + failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget) + }, errOnNoDefault +} + +// sendRouteLookupRequestLocked adds an entry to the pending request map and +// sends out an RLS request using the passed in arguments. Returns a value +// indicating if the request was throttled by the client-side adaptive +// throttler. +func (p *rlsPicker) sendRouteLookupRequestLocked(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { + if p.lb.pendingMap[cacheKey] != nil { + return false + } + + p.lb.pendingMap[cacheKey] = bs + throttled := p.ctrlCh.lookup(reqKeys, reason, staleHeaders, func(targets []string, headerData string, err error) { + p.handleRouteLookupResponse(cacheKey, targets, headerData, err) + }) + if throttled { + delete(p.lb.pendingMap, cacheKey) + } + return throttled +} + +// handleRouteLookupResponse is the callback invoked by the control channel upon +// receipt of an RLS response. Modifies the data cache and pending requests map +// and sends a new picker. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) handleRouteLookupResponse(cacheKey cacheKey, targets []string, headerData string, err error) { + p.logger.Infof("Received RLS response for key %+v with targets %+v, headerData %q, err: %v", cacheKey, targets, headerData, err) + + p.lb.cacheMu.Lock() + defer func() { + // Pending request map entry is unconditionally deleted since the request is + // no longer pending. + p.logger.Infof("Removing pending request entry for key %+v", cacheKey) + delete(p.lb.pendingMap, cacheKey) + p.lb.sendNewPicker() + p.lb.cacheMu.Unlock() + }() + + // Lookup the data cache entry or create a new one. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + if dcEntry == nil { + dcEntry = &cacheEntry{} + if _, ok := p.lb.dataCache.addEntry(cacheKey, dcEntry); !ok { + // This is a very unlikely case where we are unable to add a + // data cache entry. Log and leave. + p.logger.Warningf("Failed to add data cache entry for %+v", cacheKey) + return + } + } + + // For failed requests, the data cache entry is modified as follows: + // - status is set to error returned from the control channel + // - current backoff state is available in the pending entry + // - `retries` field is incremented and + // - backoff state is moved to the data cache + // - backoffTime is set to the time indicated by the backoff state + // - backoffExpirationTime is set to twice the backoff time + // - backoffTimer is set to fire after backoffTime + // + // When a proactive cache refresh fails, this would leave the targets and the + // expiry time from the old entry unchanged. And this mean that the old valid + // entry would be used until expiration, and a new picker would be sent upon + // backoff expiry. + now := time.Now() + + // "An RLS request is considered to have failed if it returns a non-OK + // status or the RLS response's targets list is non-empty." - RLS LB Policy + // design. + if len(targets) == 0 && err == nil { + err = fmt.Errorf("RLS response's target list does not contain any entries for key %+v", cacheKey) + // If err is set, rpc error from the control plane and no control plane + // configuration is why no targets were passed into this helper, no need + // to specify and tell the user this information. + } + if err != nil { + dcEntry.status = err + pendingEntry := p.lb.pendingMap[cacheKey] + pendingEntry.retries++ + backoffTime := pendingEntry.bs.Backoff(pendingEntry.retries) + dcEntry.backoffState = pendingEntry + dcEntry.backoffTime = now.Add(backoffTime) + dcEntry.backoffExpiryTime = now.Add(2 * backoffTime) + if dcEntry.backoffState.timer != nil { + dcEntry.backoffState.timer.Stop() + } + dcEntry.backoffState.timer = time.AfterFunc(backoffTime, p.lb.sendNewPicker) + return + } + + // For successful requests, the cache entry is modified as follows: + // - childPolicyWrappers is set to point to the child policy wrappers + // associated with the targets specified in the received response + // - headerData is set to the value received in the response + // - expiryTime, stateTime and earliestEvictionTime are set + // - status is set to nil (OK status) + // - backoff state is cleared + p.setChildPolicyWrappersInCacheEntry(dcEntry, targets) + dcEntry.headerData = headerData + dcEntry.expiryTime = now.Add(p.maxAge) + if p.staleAge != 0 { + dcEntry.staleTime = now.Add(p.staleAge) + } + dcEntry.earliestEvictTime = now.Add(minEvictDuration) + dcEntry.status = nil + dcEntry.backoffState = &backoffState{bs: defaultBackoffStrategy} + dcEntry.backoffTime = time.Time{} + dcEntry.backoffExpiryTime = time.Time{} + p.lb.dataCache.updateEntrySize(dcEntry, computeDataCacheEntrySize(cacheKey, dcEntry)) +} + +// setChildPolicyWrappersInCacheEntry sets up the childPolicyWrappers field in +// the cache entry to point to the child policy wrappers for the targets +// specified in the RLS response. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) setChildPolicyWrappersInCacheEntry(dcEntry *cacheEntry, newTargets []string) { + // If the childPolicyWrappers field is already pointing to the right targets, + // then the field's value does not need to change. + targetsChanged := true + func() { + if cpws := dcEntry.childPolicyWrappers; cpws != nil { + if len(newTargets) != len(cpws) { + return + } + for i, target := range newTargets { + if cpws[i].target != target { + return + } + } + targetsChanged = false + } + }() + if !targetsChanged { + return + } + + // If the childPolicyWrappers field is not already set to the right targets, + // then it must be reset. We construct a new list of child policies and + // then swap out the old list for the new one. + newChildPolicies := p.lb.acquireChildPolicyReferences(newTargets) + oldChildPolicyTargets := make([]string, len(dcEntry.childPolicyWrappers)) + for i, cpw := range dcEntry.childPolicyWrappers { + oldChildPolicyTargets[i] = cpw.target + } + p.lb.releaseChildPolicyReferences(oldChildPolicyTargets) + dcEntry.childPolicyWrappers = newChildPolicies +} + +func dcEntrySize(key cacheKey, entry *cacheEntry) int64 { + return int64(len(key.path) + len(key.keys) + len(entry.headerData)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go new file mode 100644 index 000000000..88bf64ec4 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -0,0 +1,636 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal/grpclog" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// Name is the name of the weighted round robin balancer. +const Name = "weighted_round_robin" + +var ( + rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.rr_fallback", + Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.", + Unit: "update", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_stale", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weights", + Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &wrrBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + target: bOpts.Target.String(), + metricsRecorder: bOpts.MetricsRecorder, + } + + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{ + // Default values as documented in A58. + OOBReportingPeriod: iserviceconfig.Duration(10 * time.Second), + BlackoutPeriod: iserviceconfig.Duration(10 * time.Second), + WeightExpirationPeriod: iserviceconfig.Duration(3 * time.Minute), + WeightUpdatePeriod: iserviceconfig.Duration(time.Second), + ErrorUtilizationPenalty: 1, + } + if err := json.Unmarshal(js, lbCfg); err != nil { + return nil, fmt.Errorf("wrr: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + + if lbCfg.ErrorUtilizationPenalty < 0 { + return nil, fmt.Errorf("wrr: errorUtilizationPenalty must be non-negative") + } + + // For easier comparisons later, ensure the OOB reporting period is unset + // (0s) when OOB reports are disabled. + if !lbCfg.EnableOOBLoadReport { + lbCfg.OOBReportingPeriod = 0 + } + + // Impose lower bound of 100ms on weightUpdatePeriod. + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < iserviceconfig.Duration(100*time.Millisecond) { + lbCfg.WeightUpdatePeriod = iserviceconfig.Duration(100 * time.Millisecond) + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + // The following fields are immutable. + cc balancer.ClientConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() + locality string +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil + cfg, ok := ccs.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) + } + + b.cfg = cfg + b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + var sc balancer.SubConn + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sc, state) + }, + }) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + + metricsRecorder: b.metricsRecorder, + target: b.target, + locality: b.locality, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + wsc.SubConn.Shutdown() + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +} + +func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() + } +} + +// Close stops the balancer. It cancels any ongoing scheduler updates and +// stops any ORCA listeners. +func (b *wrrBalancer) Close() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + metricsRecorder: b.metricsRecorder, + locality: b.locality, + target: b.target, + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +} + +// picker is the WRR policy's picker. It uses live-updating backend weights to +// update the scheduler periodically and ensure picks are routed proportional +// to those weights. +type picker struct { + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns + + // The following fields are immutable. + target string + locality string + metricsRecorder estats.MetricsRecorder +} + +func (p *picker) scWeights(recordMetrics bool) []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) + } + + return ws +} + +func (p *picker) inc() uint32 { + return atomic.AddUint32(&p.v, 1) +} + +func (p *picker) regenerateScheduler() { + s := p.newScheduler(true) + atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) +} + +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { + // No need to regenerate weights with only one backend. + return + } + + go func() { + ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.regenerateScheduler() + } + } + }() +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { + // The following fields are immutable. + balancer.SubConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + locality string + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + connectivityState connectivity.State + stopORCAListener func() + + // The following fields are accessed asynchronously and are protected by + // mu. Note that mu may not be held when calling into the stopORCAListener + // or when registering a new listener, as those calls require the ORCA + // producer mu which is held when calling the listener, and the listener + // holds mu. + mu sync.Mutex + weightVal float64 + nonEmptySince time.Time + lastUpdated time.Time + cfg *lbConfig +} + +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load + utilization := load.ApplicationUtilization + if utilization == 0 { + utilization = load.CpuUtilization + } + if utilization == 0 || load.RpsFractional == 0 { + if w.logger.V(2) { + w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) + } + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + errorRate := load.Eps / load.RpsFractional + w.weightVal = load.RpsFractional / (utilization + errorRate*w.cfg.ErrorUtilizationPenalty) + if w.logger.V(2) { + w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) + } + + w.lastUpdated = internal.TimeNow() + if w.nonEmptySince.Equal(time.Time{}) { + w.nonEmptySince = w.lastUpdated + } +} + +// updateConfig updates the parameters of the WRR policy and +// stops/starts/restarts the ORCA OOB listener. +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { + w.mu.Lock() + oldCfg := w.cfg + w.cfg = cfg + w.mu.Unlock() + + newPeriod := cfg.OOBReportingPeriod + if cfg.EnableOOBLoadReport == oldCfg.EnableOOBLoadReport && + newPeriod == oldCfg.OOBReportingPeriod { + // Load reporting wasn't enabled before or after, or load reporting was + // enabled before and after, and had the same period. (Note that with + // load reporting disabled, OOBReportingPeriod is always 0.) + return + } + // (Optionally stop and) start the listener to use the new config's + // settings for OOB reporting. + + if w.stopORCAListener != nil { + w.stopORCAListener() + } + if !cfg.EnableOOBLoadReport { + w.stopORCAListener = nil + return + } + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(newPeriod)} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Also + // reset lastUpdated to trigger endpoint weight not yet usable in the + // case endpoint gets asked what weight it is before receiving a new + // load report. Note that we cannot guarantee that we will never receive + // lingering callbacks for backend metric reports from the previous + // connection after the new connection has been established, but they + // should be masked by new backend metric reports from the new + // connection by the time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.lastUpdated = time.Time{} + w.mu.Unlock() + case connectivity.Shutdown: + if w.stopORCAListener != nil { + w.stopORCAListener() + } + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +// account the parameters. Returns 0 for blacked out or expired data, which +// will cause the backend weight to be treated as the mean of the weights of the +// other backends. If forScheduler is set to true, this function will emit +// metrics through the metrics registry. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { + w.mu.Lock() + defer w.mu.Unlock() + + if recordMetrics { + defer func() { + endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality) + }() + } + + // The SubConn has not received a load report (i.e. just turned READY with + // no load report). + if w.lastUpdated.Equal(time.Time{}) { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + return 0 + } + + // If the most recent update was longer ago than the expiration period, + // reset nonEmptySince so that we apply the blackout period again if we + // start getting data again in the future, and return 0. + if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + if recordMetrics { + endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } + w.nonEmptySince = time.Time{} + return 0 + } + + // If we don't have at least blackoutPeriod worth of data, return 0. + if blackoutPeriod != 0 && (w.nonEmptySince.Equal(time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if recordMetrics { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } + return 0 + } + + return w.weightVal +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go new file mode 100644 index 000000000..38f89d32f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Whether to enable out-of-band utilization reporting collection from the + // endpoints. By default, per-request utilization reporting is used. + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OOBReportingPeriod iserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + + // A given endpoint must report load metrics continuously for at least this + // long before the endpoint weight will be used. This avoids churn when + // the set of endpoint addresses changes. Takes effect both immediately + // after we establish a connection to an endpoint and after + // weight_expiration_period has caused us to stop using the most recent + // load metrics. Default is 10 seconds. + BlackoutPeriod iserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod iserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + + // How often endpoint weights are recalculated. Default is 1 second. + WeightUpdatePeriod iserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Default is 1.0. + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go new file mode 100644 index 000000000..7b64fbf4e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal allows for easier testing of the weightedroundrobin +// package. +package internal + +import ( + "time" +) + +// AllowAnyWeightUpdatePeriod permits any setting of WeightUpdatePeriod for +// testing. Normally a minimum of 100ms is applied. +var AllowAnyWeightUpdatePeriod bool + +// LBConfig allows tests to produce a JSON form of the config from the struct +// instead of using a string. +type LBConfig struct { + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *string `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *string `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *string `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *string `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` +} + +// TimeNow can be overridden by tests to return a different value for the +// current iserviceconfig. +var TimeNow = time.Now diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go new file mode 100644 index 000000000..43184ca9a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[%p] " + +var logger = grpclog.Component("weighted-round-robin") + +func prefixLogger(p *wrrBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go new file mode 100644 index 000000000..56aa15da1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -0,0 +1,146 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "math" +) + +type scheduler interface { + nextIndex() int +} + +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func (p *picker) newScheduler(recordMetrics bool) scheduler { + scWeights := p.scWeights(recordMetrics) + n := len(scWeights) + if n == 0 { + return nil + } + if n == 1 { + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: 1, inc: p.inc} + } + sum := float64(0) + numZero := 0 + max := float64(0) + for _, w := range scWeights { + sum += w + if w > max { + max = w + } + if w == 0 { + numZero++ + } + } + + if numZero >= n-1 { + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: uint32(n), inc: p.inc} + } + unscaledMean := sum / float64(n-numZero) + scalingFactor := maxWeight / max + mean := uint16(math.Round(scalingFactor * unscaledMean)) + + weights := make([]uint16, n) + allEqual := true + for i, w := range scWeights { + if w == 0 { + // Backends with weight = 0 use the mean. + weights[i] = mean + } else { + scaledWeight := uint16(math.Round(scalingFactor * w)) + weights[i] = scaledWeight + if scaledWeight != mean { + allEqual = false + } + } + } + + if allEqual { + return &rrScheduler{numSCs: uint32(n), inc: p.inc} + } + + logger.Infof("using edf scheduler with weights: %v", weights) + return &edfScheduler{weights: weights, inc: p.inc} +} + +const maxWeight = math.MaxUint16 + +// edfScheduler implements EDF using the same algorithm as grpc-c++ here: +// +// https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc +type edfScheduler struct { + inc func() uint32 + weights []uint16 +} + +// Returns the index in s.weights for the picker to choose. +func (s *edfScheduler) nextIndex() int { + const offset = maxWeight / 2 + + for { + idx := uint64(s.inc()) + + // The sequence number (idx) is split in two: the lower %n gives the + // index of the backend, and the rest gives the number of times we've + // iterated through all backends. `generation` is used to + // deterministically decide whether we pick or skip the backend on this + // iteration, in proportion to the backend's weight. + + backendIndex := idx % uint64(len(s.weights)) + generation := idx / uint64(len(s.weights)) + weight := uint64(s.weights[backendIndex]) + + // We pick a backend `weight` times per `maxWeight` generations. The + // multiply and modulus ~evenly spread out the picks for a given + // backend between different generations. The offset by `backendIndex` + // helps to reduce the chance of multiple consecutive non-picks: if we + // have two consecutive backends with an equal, say, 80% weight of the + // max, with no offset we would see 1/5 generations that skipped both. + // TODO(b/190488683): add test for offset efficacy. + mod := uint64(weight*generation+backendIndex*offset) % maxWeight + + if mod < maxWeight-weight { + continue + } + return int(backendIndex) + } +} + +// A simple RR scheduler to use for fallback when fewer than two backends have +// non-zero weights, or all backends have the same weight, or when only one +// subconn exists. +type rrScheduler struct { + inc func() uint32 + numSCs uint32 +} + +func (s *rrScheduler) nextIndex() int { + idx := s.inc() + return int(idx % s.numSCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go new file mode 100644 index 000000000..8741fdad1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedroundrobin provides an implementation of the weighted round +// robin LB policy, as defined in [gRFC A58]. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +// +// [gRFC A58]: https://github.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/resolver" +) + +// attributeKey is the type used as the key to store AddrInfo in the +// BalancerAttributes field of resolver.Address. +type attributeKey struct{} + +// AddrInfo will be stored in the BalancerAttributes field of Address in order +// to use weighted roundrobin balancer. +type AddrInfo struct { + Weight uint32 +} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o any) bool { + oa, ok := o.(AddrInfo) + return ok && oa.Weight == a.Weight +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with addrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. +func GetAddrInfo(addr resolver.Address) AddrInfo { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, _ := v.(AddrInfo) + return ai +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Weight: %d", a.Weight) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go new file mode 100644 index 000000000..e40613930 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[weighted-target-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *weightedTargetBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go new file mode 100644 index 000000000..bcc8aca8b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -0,0 +1,309 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedaggregator implements state aggregator for weighted_target +// balancer. +// +// This is a separate package so it can be shared by weighted_target and eds. +// The eds balancer will be refactored to use weighted_target directly. After +// that, all functions and structs in this package can be moved to package +// weightedtarget and unexported. +package weightedaggregator + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/wrr" +) + +type weightedPickerState struct { + weight uint32 + state balancer.State + // stateToAggregate is the connectivity state used only for state + // aggregation. It could be different from state.ConnectivityState. For + // example when a sub-balancer transitions from TransientFailure to + // connecting, state.ConnectivityState is Connecting, but stateToAggregate + // is still TransientFailure. + stateToAggregate connectivity.State +} + +func (s *weightedPickerState) String() string { + return fmt.Sprintf("weight:%v,picker:%p,state:%v,stateToAggregate:%v", s.weight, s.state.Picker, s.state.ConnectivityState, s.stateToAggregate) +} + +// Aggregator is the weighted balancer state aggregator. +type Aggregator struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + newWRR func() wrr.WRR + + csEvltr *balancer.ConnectivityStateEvaluator + + mu sync.Mutex + // If started is false, no updates should be sent to the parent cc. A closed + // sub-balancer could still send pickers to this aggregator. This makes sure + // that no updates will be forwarded to parent when the whole balancer group + // and states aggregator is closed. + started bool + // All balancer IDs exist as keys in this map, even if balancer group is not + // started. + // + // If an ID is not in map, it's either removed or never added. + idToPickerState map[string]*weightedPickerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool +} + +// New creates a new weighted balancer state aggregator. +func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr.WRR) *Aggregator { + return &Aggregator{ + cc: cc, + logger: logger, + newWRR: newWRR, + csEvltr: &balancer.ConnectivityStateEvaluator{}, + idToPickerState: make(map[string]*weightedPickerState), + } +} + +// Start starts the aggregator. It can be called after Stop to restart the +// aggregator. +func (wbsa *Aggregator) Start() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.started = true +} + +// Stop stops the aggregator. When the aggregator is stopped, it won't call +// parent ClientConn to update balancer state. +func (wbsa *Aggregator) Stop() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.started = false + wbsa.clearStates() +} + +// Add adds a sub-balancer state with weight. It adds a place holder, and waits for +// the real sub-balancer to update state. +func (wbsa *Aggregator) Add(id string, weight uint32) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.idToPickerState[id] = &weightedPickerState{ + weight: weight, + // Start everything in CONNECTING, so if one of the sub-balancers + // reports TransientFailure, the RPCs will still wait for the other + // sub-balancers. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + stateToAggregate: connectivity.Connecting, + } + wbsa.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + + wbsa.buildAndUpdateLocked() +} + +// Remove removes the sub-balancer state. Future updates from this sub-balancer, +// if any, will be ignored. +func (wbsa *Aggregator) Remove(id string) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + if _, ok := wbsa.idToPickerState[id]; !ok { + return + } + // Setting the state of the deleted sub-balancer to Shutdown will get csEvltr + // to remove the previous state for any aggregated state evaluations. + // transitions to and from connectivity.Shutdown are ignored by csEvltr. + wbsa.csEvltr.RecordTransition(wbsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) + // Remove id and picker from picker map. This also results in future updates + // for this ID to be ignored. + delete(wbsa.idToPickerState, id) + wbsa.buildAndUpdateLocked() +} + +// UpdateWeight updates the weight for the given id. Note that this doesn't +// trigger an update to the parent ClientConn. The caller should decide when +// it's necessary, and call BuildAndUpdate. +func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + pState, ok := wbsa.idToPickerState[id] + if !ok { + return + } + pState.weight = newWeight +} + +// PauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (wbsa *Aggregator) PauseStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = true + wbsa.needUpdateStateOnResume = false +} + +// ResumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (wbsa *Aggregator) ResumeStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = false + if wbsa.needUpdateStateOnResume { + wbsa.cc.UpdateState(wbsa.build()) + } +} + +// NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a +// picker update be sent once ResumeStateUpdates is called. +func (wbsa *Aggregator) NeedUpdateStateOnResume() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.needUpdateStateOnResume = true +} + +// UpdateState is called to report a balancer state change from sub-balancer. +// It's usually called by the balancer group. +// +// It calls parent ClientConn's UpdateState with the new aggregated state. +func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + state, ok := wbsa.idToPickerState[id] + if !ok { + // All state starts with an entry in pickStateMap. If ID is not in map, + // it's either removed, or never existed. + return + } + + if !(state.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { + // If old state is TransientFailure, and new state is Connecting, don't + // update the state, to prevent the aggregated state from being always + // CONNECTING. Otherwise, stateToAggregate is the same as + // state.ConnectivityState. + wbsa.csEvltr.RecordTransition(state.stateToAggregate, newState.ConnectivityState) + state.stateToAggregate = newState.ConnectivityState + } + state.state = newState + + wbsa.buildAndUpdateLocked() +} + +// clearState Reset everything to init state (Connecting) but keep the entry in +// map (to keep the weight). +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) clearStates() { + for _, pState := range wbsa.idToPickerState { + pState.state = balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + } + pState.stateToAggregate = connectivity.Connecting + } +} + +// buildAndUpdateLocked aggregates the connectivity states of the sub-balancers, +// builds a new picker and sends an update to the parent ClientConn. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) buildAndUpdateLocked() { + if !wbsa.started { + return + } + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + + wbsa.cc.UpdateState(wbsa.build()) +} + +// build combines sub-states into one. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) build() balancer.State { + wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) + + // Make sure picker's return error is consistent with the aggregatedState. + pickers := make([]weightedPickerState, 0, len(wbsa.idToPickerState)) + + switch aggState := wbsa.csEvltr.CurrentState(); aggState { + case connectivity.Connecting: + return balancer.State{ + ConnectivityState: aggState, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)} + case connectivity.TransientFailure: + // this means that all sub-balancers are now in TransientFailure. + for _, ps := range wbsa.idToPickerState { + pickers = append(pickers, *ps) + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} + default: + for _, ps := range wbsa.idToPickerState { + if ps.stateToAggregate == connectivity.Ready { + pickers = append(pickers, *ps) + } + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} + } + +} + +type weightedPickerGroup struct { + w wrr.WRR +} + +// newWeightedPickerGroup takes pickers with weights, and groups them into one +// picker. +// +// Note it only takes ready pickers. The map shouldn't contain non-ready +// pickers. +func newWeightedPickerGroup(readyWeightedPickers []weightedPickerState, newWRR func() wrr.WRR) *weightedPickerGroup { + w := newWRR() + for _, ps := range readyWeightedPickers { + w.Add(ps.state.Picker, int64(ps.weight)) + } + + return &weightedPickerGroup{ + w: w, + } +} + +func (pg *weightedPickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + p, ok := pg.w.Next().(balancer.Picker) + if !ok { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + return p.Pick(info) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go new file mode 100644 index 000000000..dfd1ef26d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedtarget implements the weighted_target balancer. +// +// All APIs in this package are experimental. +package weightedtarget + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the weighted_target balancer. +const Name = "weighted_target_experimental" + +// NewRandomWRR is the WRR constructor used to pick sub-pickers from +// sub-balancers. It's to be modified in tests. +var NewRandomWRR = wrr.NewRandom + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &weightedTargetBalancer{} + b.logger = prefixLogger(b) + b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) + b.stateAggregator.Start() + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + b.bg.Start() + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type weightedTargetBalancer struct { + logger *grpclog.PrefixLogger + + bg *balancergroup.BalancerGroup + stateAggregator *weightedaggregator.Aggregator + + targets map[string]Target +} + +type localityKeyType string + +const localityKey = localityKeyType("locality") + +// LocalityFromResolverState returns the locality from the resolver.State +// provided, or an empty string if not present. +func LocalityFromResolverState(state resolver.State) string { + locality, _ := state.Attributes.Value(localityKey).(string) + return locality +} + +// UpdateClientConnState takes the new targets in balancer group, +// creates/deletes sub-balancers and sends them update. addresses are split into +// groups based on hierarchy path. +func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + b.stateAggregator.PauseStateUpdates() + defer b.stateAggregator.ResumeStateUpdates() + + // Remove sub-pickers and sub-balancers that are not in the new config. + for name := range b.targets { + if _, ok := newConfig.Targets[name]; !ok { + b.stateAggregator.Remove(name) + b.bg.Remove(name) + } + } + + // For sub-balancers in the new config + // - if it's new. add to balancer group, + // - if it's old, but has a new weight, update weight in balancer group. + // + // For all sub-balancers, forward the address/balancer config update. + for name, newT := range newConfig.Targets { + oldT, ok := b.targets[name] + if !ok { + // If this is a new sub-balancer, add weights to the picker map. + b.stateAggregator.Add(name, newT.Weight) + // Then add to the balancer group. + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + // Not trigger a state/picker update. Wait for the new sub-balancer + // to send its updates. + } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { + // If the child policy name is different, remove from balancer group + // and re-add. + b.stateAggregator.Remove(name) + b.bg.Remove(name) + b.stateAggregator.Add(name, newT.Weight) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + } else if newT.Weight != oldT.Weight { + // If this is an existing sub-balancer, update weight if necessary. + b.stateAggregator.UpdateWeight(name, newT.Weight) + } + + // Forwards all the update: + // - addresses are from the map after splitting with hierarchy path, + // - Top level service config and attributes are the same, + // - Balancer config comes from the targets map. + // + // TODO: handle error? How to aggregate errors and return? + _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), + }, + BalancerConfig: newT.ChildPolicy.Config, + }) + } + + b.targets = newConfig.Targets + + // If the targets length is zero, it means we have removed all child + // policies from the balancer group and aggregator. + // At the start of this UpdateClientConnState() operation, a call to + // b.stateAggregator.ResumeStateUpdates() is deferred. Thus, setting the + // needUpdateStateOnResume bool to true here will ensure a new picker is + // built as part of that deferred function. Since there are now no child + // policies, the aggregated connectivity state reported form the Aggregator + // will be TRANSIENT_FAILURE. + if len(b.targets) == 0 { + b.stateAggregator.NeedUpdateStateOnResume() + } + + return nil +} + +func (b *weightedTargetBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *weightedTargetBalancer) Close() { + b.stateAggregator.Stop() + b.bg.Close() +} + +func (b *weightedTargetBalancer) ExitIdle() { + b.bg.ExitIdle() +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go new file mode 100644 index 000000000..52090cd67 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Target represents one target with the weight and the child policy. +type Target struct { + // Weight is the weight of the child policy. + Weight uint32 `json:"weight,omitempty"` + // ChildPolicy is the child policy and it's config. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// LBConfig is the balancer config for weighted_target. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Targets map[string]Target `json:"targets,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go new file mode 100644 index 000000000..11fae92ad --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/grpcsync" +) + +// Distributor makes it easy for provider implementations to furnish new key +// materials by handling synchronization between the producer and consumers of +// the key material. +// +// Provider implementations which choose to use a Distributor should do the +// following: +// - create a new Distributor using the NewDistributor() function. +// - invoke the Set() method whenever they have new key material or errors to +// report. +// - delegate to the distributor when handing calls to KeyMaterial(). +// - invoke the Stop() method when they are done using the distributor. +type Distributor struct { + // mu protects the underlying key material. + mu sync.Mutex + km *KeyMaterial + pErr error + + // ready channel to unblock KeyMaterial() invocations blocked on + // availability of key material. + ready *grpcsync.Event + // done channel to notify provider implementations and unblock any + // KeyMaterial() calls, once the Distributor is closed. + closed *grpcsync.Event +} + +// NewDistributor returns a new Distributor. +func NewDistributor() *Distributor { + return &Distributor{ + ready: grpcsync.NewEvent(), + closed: grpcsync.NewEvent(), + } +} + +// Set updates the key material in the distributor with km. +// +// Provider implementations which use the distributor must not modify the +// contents of the KeyMaterial struct pointed to by km. +// +// A non-nil err value indicates the error that the provider implementation ran +// into when trying to fetch key material, and makes it possible to surface the +// error to the user. A non-nil error value passed here causes distributor's +// KeyMaterial() method to return nil key material. +func (d *Distributor) Set(km *KeyMaterial, err error) { + d.mu.Lock() + d.km = km + d.pErr = err + if err != nil { + // If a non-nil err is passed, we ignore the key material being passed. + d.km = nil + } + d.ready.Fire() + d.mu.Unlock() +} + +// KeyMaterial returns the most recent key material provided to the Distributor. +// If no key material was provided at the time of this call, it will block until +// the deadline on the context expires or fresh key material arrives. +func (d *Distributor) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + if d.closed.HasFired() { + return nil, errProviderClosed + } + + if d.ready.HasFired() { + return d.keyMaterial() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-d.closed.Done(): + return nil, errProviderClosed + case <-d.ready.Done(): + return d.keyMaterial() + } +} + +func (d *Distributor) keyMaterial() (*KeyMaterial, error) { + d.mu.Lock() + defer d.mu.Unlock() + return d.km, d.pErr +} + +// Stop turns down the distributor, releases allocated resources and fails any +// active KeyMaterial() call waiting for new key material. +func (d *Distributor) Stop() { + d.closed.Fire() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go new file mode 100644 index 000000000..8c15baeb5 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pemfile + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + PluginName = "file_watcher" + defaultRefreshInterval = 10 * time.Minute +) + +func init() { + certprovider.Register(&pluginBuilder{}) +} + +type pluginBuilder struct{} + +func (p *pluginBuilder) ParseConfig(c any) (*certprovider.BuildableConfig, error) { + data, ok := c.(json.RawMessage) + if !ok { + return nil, fmt.Errorf("meshca: unsupported config type: %T", c) + } + opts, err := pluginConfigFromJSON(data) + if err != nil { + return nil, err + } + return certprovider.NewBuildableConfig(PluginName, opts.canonical(), func(certprovider.BuildOptions) certprovider.Provider { + return newProvider(opts) + }), nil +} + +func (p *pluginBuilder) Name() string { + return PluginName +} + +func pluginConfigFromJSON(jd json.RawMessage) (Options, error) { + // The only difference between this anonymous struct and the Options struct + // is that the refresh_interval is represented here as a duration proto, + // while in the latter a time.Duration is used. + cfg := &struct { + CertificateFile string `json:"certificate_file,omitempty"` + PrivateKeyFile string `json:"private_key_file,omitempty"` + CACertificateFile string `json:"ca_certificate_file,omitempty"` + RefreshInterval json.RawMessage `json:"refresh_interval,omitempty"` + }{} + if err := json.Unmarshal(jd, cfg); err != nil { + return Options{}, fmt.Errorf("pemfile: json.Unmarshal(%s) failed: %v", string(jd), err) + } + + opts := Options{ + CertFile: cfg.CertificateFile, + KeyFile: cfg.PrivateKeyFile, + RootFile: cfg.CACertificateFile, + // Refresh interval is the only field in the configuration for which we + // support a default value. We cannot possibly have valid defaults for + // file paths to watch. Also, it is valid to specify an empty path for + // some of those fields if the user does not want to watch them. + RefreshDuration: defaultRefreshInterval, + } + if cfg.RefreshInterval != nil { + dur := &durationpb.Duration{} + if err := protojson.Unmarshal(cfg.RefreshInterval, dur); err != nil { + return Options{}, fmt.Errorf("pemfile: protojson.Unmarshal(%+v) failed: %v", cfg.RefreshInterval, err) + } + opts.RefreshDuration = dur.AsDuration() + } + + if err := opts.validate(); err != nil { + return Options{}, err + } + return opts, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go new file mode 100644 index 000000000..7ed5c53ba --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pemfile provides a file watching certificate provider plugin +// implementation which works for files with PEM contents. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package pemfile + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/grpclog" +) + +const defaultCertRefreshDuration = 1 * time.Hour + +var ( + // For overriding from unit tests. + newDistributor = func() distributor { return certprovider.NewDistributor() } + + logger = grpclog.Component("pemfile") +) + +// Options configures a certificate provider plugin that watches a specified set +// of files that contain certificates and keys in PEM format. +type Options struct { + // CertFile is the file that holds the identity certificate. + // Optional. If this is set, KeyFile must also be set. + CertFile string + // KeyFile is the file that holds identity private key. + // Optional. If this is set, CertFile must also be set. + KeyFile string + // RootFile is the file that holds trusted root certificate(s). + // Optional. + RootFile string + // RefreshDuration is the amount of time the plugin waits before checking + // for updates in the specified files. + // Optional. If not set, a default value (1 hour) will be used. + RefreshDuration time.Duration +} + +func (o Options) canonical() []byte { + return []byte(fmt.Sprintf("%s:%s:%s:%s", o.CertFile, o.KeyFile, o.RootFile, o.RefreshDuration)) +} + +func (o Options) validate() error { + if o.CertFile == "" && o.KeyFile == "" && o.RootFile == "" { + return fmt.Errorf("pemfile: at least one credential file needs to be specified") + } + if keySpecified, certSpecified := o.KeyFile != "", o.CertFile != ""; keySpecified != certSpecified { + return fmt.Errorf("pemfile: private key file and identity cert file should be both specified or not specified") + } + // C-core has a limitation that they cannot verify that a certificate file + // matches a key file. So, the only way to get around this is to make sure + // that both files are in the same directory and that they do an atomic + // read. Even though Java/Go do not have this limitation, we want the + // overall plugin behavior to be consistent across languages. + if certDir, keyDir := filepath.Dir(o.CertFile), filepath.Dir(o.KeyFile); certDir != keyDir { + return errors.New("pemfile: certificate and key file must be in the same directory") + } + return nil +} + +// NewProvider returns a new certificate provider plugin that is configured to +// watch the PEM files specified in the passed in options. +func NewProvider(o Options) (certprovider.Provider, error) { + if err := o.validate(); err != nil { + return nil, err + } + return newProvider(o), nil +} + +// newProvider is used to create a new certificate provider plugin after +// validating the options, and hence does not return an error. +func newProvider(o Options) certprovider.Provider { + if o.RefreshDuration == 0 { + o.RefreshDuration = defaultCertRefreshDuration + } + + provider := &watcher{opts: o} + if o.CertFile != "" && o.KeyFile != "" { + provider.identityDistributor = newDistributor() + } + if o.RootFile != "" { + provider.rootDistributor = newDistributor() + } + + ctx, cancel := context.WithCancel(context.Background()) + provider.cancel = cancel + go provider.run(ctx) + return provider +} + +// watcher is a certificate provider plugin that implements the +// certprovider.Provider interface. It watches a set of certificate and key +// files and provides the most up-to-date key material for consumption by +// credentials implementation. +type watcher struct { + identityDistributor distributor + rootDistributor distributor + opts Options + certFileContents []byte + keyFileContents []byte + rootFileContents []byte + cancel context.CancelFunc +} + +// distributor wraps the methods on certprovider.Distributor which are used by +// the plugin. This is very useful in tests which need to know exactly when the +// plugin updates its key material. +type distributor interface { + KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) + Set(km *certprovider.KeyMaterial, err error) + Stop() +} + +// updateIdentityDistributor checks if the cert/key files that the plugin is +// watching have changed, and if so, reads the new contents and updates the +// identityDistributor with the new key material. +// +// Skips updates when file reading or parsing fails. +// TODO(easwars): Retry with limit (on the number of retries or the amount of +// time) upon failures. +func (w *watcher) updateIdentityDistributor() { + if w.identityDistributor == nil { + return + } + + certFileContents, err := os.ReadFile(w.opts.CertFile) + if err != nil { + logger.Warningf("certFile (%s) read failed: %v", w.opts.CertFile, err) + return + } + keyFileContents, err := os.ReadFile(w.opts.KeyFile) + if err != nil { + logger.Warningf("keyFile (%s) read failed: %v", w.opts.KeyFile, err) + return + } + // If the file contents have not changed, skip updating the distributor. + if bytes.Equal(w.certFileContents, certFileContents) && bytes.Equal(w.keyFileContents, keyFileContents) { + return + } + + cert, err := tls.X509KeyPair(certFileContents, keyFileContents) + if err != nil { + logger.Warningf("tls.X509KeyPair(%q, %q) failed: %v", certFileContents, keyFileContents, err) + return + } + w.certFileContents = certFileContents + w.keyFileContents = keyFileContents + w.identityDistributor.Set(&certprovider.KeyMaterial{Certs: []tls.Certificate{cert}}, nil) +} + +// updateRootDistributor checks if the root cert file that the plugin is +// watching hs changed, and if so, updates the rootDistributor with the new key +// material. +// +// Skips updates when root cert reading or parsing fails. +// TODO(easwars): Retry with limit (on the number of retries or the amount of +// time) upon failures. +func (w *watcher) updateRootDistributor() { + if w.rootDistributor == nil { + return + } + + rootFileContents, err := os.ReadFile(w.opts.RootFile) + if err != nil { + logger.Warningf("rootFile (%s) read failed: %v", w.opts.RootFile, err) + return + } + trustPool := x509.NewCertPool() + if !trustPool.AppendCertsFromPEM(rootFileContents) { + logger.Warning("failed to parse root certificate") + return + } + // If the file contents have not changed, skip updating the distributor. + if bytes.Equal(w.rootFileContents, rootFileContents) { + return + } + + w.rootFileContents = rootFileContents + w.rootDistributor.Set(&certprovider.KeyMaterial{Roots: trustPool}, nil) +} + +// run is a long running goroutine which watches the configured files for +// changes, and pushes new key material into the appropriate distributors which +// is returned from calls to KeyMaterial(). +func (w *watcher) run(ctx context.Context) { + ticker := time.NewTicker(w.opts.RefreshDuration) + for { + w.updateIdentityDistributor() + w.updateRootDistributor() + select { + case <-ctx.Done(): + ticker.Stop() + if w.identityDistributor != nil { + w.identityDistributor.Stop() + } + if w.rootDistributor != nil { + w.rootDistributor.Stop() + } + return + case <-ticker.C: + } + } +} + +// KeyMaterial returns the key material sourced by the watcher. +// Callers are expected to use the returned value as read-only. +func (w *watcher) KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) { + km := &certprovider.KeyMaterial{} + if w.identityDistributor != nil { + identityKM, err := w.identityDistributor.KeyMaterial(ctx) + if err != nil { + return nil, err + } + km.Certs = identityKM.Certs + } + if w.rootDistributor != nil { + rootKM, err := w.rootDistributor.KeyMaterial(ctx) + if err != nil { + return nil, err + } + km.Roots = rootKM.Roots + } + return km, nil +} + +// Close cleans up resources allocated by the watcher. +func (w *watcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go new file mode 100644 index 000000000..07ba05b10 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certprovider defines APIs for Certificate Providers in gRPC. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package certprovider + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.GetCertificateProviderBuilder = getBuilder +} + +var ( + // errProviderClosed is returned by Distributor.KeyMaterial when it is + // closed. + errProviderClosed = errors.New("provider instance is closed") + + // m is a map from name to Provider builder. + m = make(map[string]Builder) +) + +// Register registers the Provider builder, whose name as returned by its Name() +// method will be used as the name registered with this builder. Registered +// Builders are used by the Store to create Providers. +func Register(b Builder) { + m[b.Name()] = b +} + +// getBuilder returns the Provider builder registered with the given name. +// If no builder is registered with the provided name, nil will be returned. +func getBuilder(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return nil +} + +// Builder creates a Provider. +type Builder interface { + // ParseConfig parses the given config, which is in a format specific to individual + // implementations, and returns a BuildableConfig on success. + ParseConfig(any) (*BuildableConfig, error) + + // Name returns the name of providers built by this builder. + Name() string +} + +// Provider makes it possible to keep channel credential implementations up to +// date with secrets that they rely on to secure communications on the +// underlying channel. +// +// Provider implementations are free to rely on local or remote sources to fetch +// the latest secrets, and free to share any state between different +// instantiations as they deem fit. +type Provider interface { + // KeyMaterial returns the key material sourced by the Provider. + // Callers are expected to use the returned value as read-only. + KeyMaterial(ctx context.Context) (*KeyMaterial, error) + + // Close cleans up resources allocated by the Provider. + Close() +} + +// KeyMaterial wraps the certificates and keys returned by a Provider instance. +type KeyMaterial struct { + // Certs contains a slice of cert/key pairs used to prove local identity. + Certs []tls.Certificate + // Roots contains the set of trusted roots to validate the peer's identity. + Roots *x509.CertPool +} + +// BuildOptions contains parameters passed to a Provider at build time. +type BuildOptions struct { + // CertName holds the certificate name, whose key material is of interest to + // the caller. + CertName string + // WantRoot indicates if the caller is interested in the root certificate. + WantRoot bool + // WantIdentity indicates if the caller is interested in the identity + // certificate. + WantIdentity bool +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go new file mode 100644 index 000000000..a4b99e3d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "fmt" + "sync" + "sync/atomic" +) + +// provStore is the global singleton certificate provider store. +var provStore = &store{ + providers: make(map[storeKey]*wrappedProvider), +} + +// storeKey acts as the key to the map of providers maintained by the store. A +// combination of provider name and configuration is used to uniquely identify +// every provider instance in the store. Go maps need to be indexed by +// comparable types, so the provider configuration is converted from `any` to +// `string` using the ParseConfig method while creating this key. +type storeKey struct { + // name of the certificate provider. + name string + // configuration of the certificate provider in string form. + config string + // opts contains the certificate name and other keyMaterial options. + opts BuildOptions +} + +// wrappedProvider wraps a provider instance with a reference count. +type wrappedProvider struct { + Provider + refCount int + + // A reference to the key and store are also kept here to override the + // Close method on the provider. + storeKey storeKey + store *store +} + +// closedProvider always returns errProviderClosed error. +type closedProvider struct{} + +func (c closedProvider) KeyMaterial(context.Context) (*KeyMaterial, error) { + return nil, errProviderClosed +} + +func (c closedProvider) Close() { +} + +// singleCloseWrappedProvider wraps a provider instance with a reference count +// to properly handle multiple calls to Close. +type singleCloseWrappedProvider struct { + provider atomic.Pointer[Provider] +} + +// store is a collection of provider instances, safe for concurrent access. +type store struct { + mu sync.Mutex + providers map[storeKey]*wrappedProvider +} + +// Close overrides the Close method of the embedded provider. It releases the +// reference held by the caller on the underlying provider and if the +// provider's reference count reaches zero, it is removed from the store, and +// its Close method is also invoked. +func (wp *wrappedProvider) Close() { + ps := wp.store + ps.mu.Lock() + defer ps.mu.Unlock() + + wp.refCount-- + if wp.refCount == 0 { + wp.Provider.Close() + delete(ps.providers, wp.storeKey) + } +} + +// Close overrides the Close method of the embedded provider to avoid release the +// already released reference. +func (w *singleCloseWrappedProvider) Close() { + newProvider := Provider(closedProvider{}) + oldProvider := w.provider.Swap(&newProvider) + (*oldProvider).Close() +} + +// KeyMaterial returns the key material sourced by the Provider. +// Callers are expected to use the returned value as read-only. +func (w *singleCloseWrappedProvider) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + return (*w.provider.Load()).KeyMaterial(ctx) +} + +// newSingleCloseWrappedProvider create wrapper a provider instance with a reference count +// to properly handle multiple calls to Close. +func newSingleCloseWrappedProvider(provider Provider) *singleCloseWrappedProvider { + w := &singleCloseWrappedProvider{} + w.provider.Store(&provider) + return w +} + +// BuildableConfig wraps parsed provider configuration and functionality to +// instantiate provider instances. +type BuildableConfig struct { + name string + config []byte + starter func(BuildOptions) Provider + pStore *store +} + +// NewBuildableConfig creates a new BuildableConfig with the given arguments. +// Provider implementations are expected to invoke this function after parsing +// the given configuration as part of their ParseConfig() method. +// Equivalent configurations are expected to invoke this function with the same +// config argument. +func NewBuildableConfig(name string, config []byte, starter func(BuildOptions) Provider) *BuildableConfig { + return &BuildableConfig{ + name: name, + config: config, + starter: starter, + pStore: provStore, + } +} + +// Build kicks off a provider instance with the wrapped configuration. Multiple +// invocations of this method with the same opts will result in provider +// instances being reused. +func (bc *BuildableConfig) Build(opts BuildOptions) (Provider, error) { + provStore.mu.Lock() + defer provStore.mu.Unlock() + + sk := storeKey{ + name: bc.name, + config: string(bc.config), + opts: opts, + } + if wp, ok := provStore.providers[sk]; ok { + wp.refCount++ + return newSingleCloseWrappedProvider(wp), nil + } + + provider := bc.starter(opts) + if provider == nil { + return nil, fmt.Errorf("provider(%q, %q).Build(%v) failed", sk.name, sk.config, opts) + } + wp := &wrappedProvider{ + Provider: provider, + refCount: 1, + storeKey: sk, + store: provStore, + } + provStore.providers[sk] = wp + return newSingleCloseWrappedProvider(wp), nil +} + +// String returns the provider name and config as a colon separated string. +func (bc *BuildableConfig) String() string { + return fmt.Sprintf("%s:%s", bc.name, string(bc.config)) +} + +// ParseConfig is a convenience function to create a BuildableConfig given a +// provider name and configuration. Returns an error if there is no registered +// builder for the given name or if the config parsing fails. +func ParseConfig(name string, config any) (*BuildableConfig, error) { + parser := getBuilder(name) + if parser == nil { + return nil, fmt.Errorf("no certificate provider builder found for %q", name) + } + return parser.ParseConfig(config) +} + +// GetProvider is a convenience function to create a provider given the name, +// config and build options. +func GetProvider(name string, config any, opts BuildOptions) (Provider, error) { + bc, err := ParseConfig(name, config) + if err != nil { + return nil, err + } + return bc.Build(opts) +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 000000000..6306e8bb0 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { + last := len(buf) + if last < 4 { + return -1 + } + return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/internal/admin/admin.go b/vendor/google.golang.org/grpc/internal/admin/admin.go new file mode 100644 index 000000000..a9285ee74 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/admin/admin.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package admin contains internal implementation for admin service. +package admin + +import "google.golang.org/grpc" + +// services is a map from name to service register functions. +var services []func(grpc.ServiceRegistrar) (func(), error) + +// AddService adds a service to the list of admin services. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. +// +// If multiple services with the same service name are added (e.g. two services +// for `grpc.channelz.v1.Channelz`), the server will panic on `Register()`. +func AddService(f func(grpc.ServiceRegistrar) (func(), error)) { + services = append(services, f) +} + +// Register registers the set of admin services to the given server. +func Register(s grpc.ServiceRegistrar) (cleanup func(), _ error) { + var cleanups []func() + for _, f := range services { + cleanup, err := f(s) + if err != nil { + callFuncs(cleanups) + return nil, err + } + if cleanup != nil { + cleanups = append(cleanups, cleanup) + } + } + return func() { + callFuncs(cleanups) + }, nil +} + +func callFuncs(fs []func()) { + for _, f := range fs { + f() + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go b/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go new file mode 100644 index 000000000..0c96f1b81 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package nop implements a balancer with all of its balancer operations as +// no-ops, other than returning a Transient Failure Picker on a Client Conn +// update. +package nop + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +// bal is a balancer with all of its balancer operations as no-ops, other than +// returning a Transient Failure Picker on a Client Conn update. +type bal struct { + cc balancer.ClientConn + err error +} + +// NewBalancer returns a no-op balancer. +func NewBalancer(cc balancer.ClientConn, err error) balancer.Balancer { + return &bal{ + cc: cc, + err: err, + } +} + +// UpdateClientConnState updates the bal's Client Conn with an Error Picker +// and a Connectivity State of TRANSIENT_FAILURE. +func (b *bal) UpdateClientConnState(_ balancer.ClientConnState) error { + b.cc.UpdateState(balancer.State{ + Picker: base.NewErrPicker(b.err), + ConnectivityState: connectivity.TransientFailure, + }) + return nil +} + +// ResolverError is a no-op. +func (b *bal) ResolverError(_ error) {} + +// UpdateSubConnState is a no-op. +func (b *bal) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) {} + +// Close is a no-op. +func (b *bal) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go new file mode 100644 index 000000000..31c9cdc9d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -0,0 +1,613 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancergroup implements a utility struct to bind multiple balancers +// into one balancer. +package balancergroup + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// subBalancerWrapper is used to keep the configurations that will be used to start +// the underlying balancer. It can be called to start/stop the underlying +// balancer. +// +// When the config changes, it will pass the update to the underlying balancer +// if it exists. +// +// TODO: move to a separate file? +type subBalancerWrapper struct { + // subBalancerWrapper is passed to the sub-balancer as a ClientConn + // wrapper, only to keep the state and picker. When sub-balancer is + // restarted while in cache, the picker needs to be resent. + // + // It also contains the sub-balancer ID, so the parent balancer group can + // keep track of SubConn/pickers and the sub-balancers they belong to. Some + // of the actions are forwarded to the parent ClientConn with no change. + // Some are forward to balancer group with the sub-balancer ID. + balancer.ClientConn + id string + group *BalancerGroup + + mu sync.Mutex + state balancer.State + + // The static part of sub-balancer. Keeps balancerBuilders and addresses. + // To be used when restarting sub-balancer. + builder balancer.Builder + // Options to be passed to sub-balancer at the time of creation. + buildOpts balancer.BuildOptions + // ccState is a cache of the addresses/balancer config, so when the balancer + // is restarted after close, it will get the previous update. It's a pointer + // and is set to nil at init, so when the balancer is built for the first + // time (not a restart), it won't receive an empty update. Note that this + // isn't reset to nil when the underlying balancer is closed. + ccState *balancer.ClientConnState + // The dynamic part of sub-balancer. Only used when balancer group is + // started. Gets cleared when sub-balancer is closed. + balancer *gracefulswitch.Balancer +} + +// UpdateState overrides balancer.ClientConn, to keep state and picker. +func (sbc *subBalancerWrapper) UpdateState(state balancer.State) { + sbc.mu.Lock() + sbc.state = state + sbc.group.updateBalancerState(sbc.id, state) + sbc.mu.Unlock() +} + +// NewSubConn overrides balancer.ClientConn, so balancer group can keep track of +// the relation between subconns and sub-balancers. +func (sbc *subBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return sbc.group.newSubConn(sbc, addrs, opts) +} + +func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() { + sbc.mu.Lock() + if sbc.state.Picker != nil { + sbc.group.updateBalancerState(sbc.id, sbc.state) + } + sbc.mu.Unlock() +} + +func (sbc *subBalancerWrapper) startBalancer() { + if sbc.balancer == nil { + sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) + } + sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id) + sbc.balancer.SwitchTo(sbc.builder) + if sbc.ccState != nil { + sbc.balancer.UpdateClientConnState(*sbc.ccState) + } +} + +// exitIdle invokes the sub-balancer's ExitIdle method. Returns a boolean +// indicating whether or not the operation was completed. +func (sbc *subBalancerWrapper) exitIdle() (complete bool) { + b := sbc.balancer + if b == nil { + return true + } + b.ExitIdle() + return true +} + +func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) error { + sbc.ccState = &s + b := sbc.balancer + if b == nil { + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. + return nil + } + return b.UpdateClientConnState(s) +} + +func (sbc *subBalancerWrapper) resolverError(err error) { + b := sbc.balancer + if b == nil { + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. + return + } + b.ResolverError(err) +} + +func (sbc *subBalancerWrapper) stopBalancer() { + if sbc.balancer == nil { + return + } + sbc.balancer.Close() + sbc.balancer = nil +} + +// BalancerGroup takes a list of balancers, each behind a gracefulswitch +// balancer, and make them into one balancer. +// +// Note that this struct doesn't implement balancer.Balancer, because it's not +// intended to be used directly as a balancer. It's expected to be used as a +// sub-balancer manager by a high level balancer. +// +// Updates from ClientConn are forwarded to sub-balancers +// - service config update +// - address update +// - subConn state change +// - find the corresponding balancer and forward +// +// Actions from sub-balances are forwarded to parent ClientConn +// - new/remove SubConn +// - picker update and health states change +// - sub-pickers are sent to an aggregator provided by the parent, which +// will group them into a group-picker. The aggregated connectivity state is +// also handled by the aggregator. +// - resolveNow +// +// Sub-balancers are only built when the balancer group is started. If the +// balancer group is closed, the sub-balancers are also closed. And it's +// guaranteed that no updates will be sent to parent ClientConn from a closed +// balancer group. +type BalancerGroup struct { + cc balancer.ClientConn + buildOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + + // stateAggregator is where the state/picker updates will be sent to. It's + // provided by the parent balancer, to build a picker with all the + // sub-pickers. + stateAggregator BalancerStateAggregator + + // outgoingMu guards all operations in the direction: + // ClientConn-->Sub-balancer. Including start, stop, resolver updates and + // SubConn state changes. + // + // The corresponding boolean outgoingStarted is used to stop further updates + // to sub-balancers after they are closed. + outgoingMu sync.Mutex + outgoingStarted bool + idToBalancerConfig map[string]*subBalancerWrapper + // Cache for sub-balancers when they are removed. This is `nil` if caching + // is disabled by passing `0` for Options.SubBalancerCloseTimeout`. + deletedBalancerCache *cache.TimeoutCache + + // incomingMu is to make sure this balancer group doesn't send updates to cc + // after it's closed. + // + // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer + // may call back to balancer group inline. It causes deadlock if they + // require the same mutex). + // + // We should never need to hold multiple locks at the same time in this + // struct. The case where two locks are held can only happen when the + // underlying balancer calls back into balancer group inline. So there's an + // implicit lock acquisition order that outgoingMu is locked before + // incomingMu. + + // incomingMu guards all operations in the direction: + // Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn. It also + // guards the map from SubConn to balancer ID, so updateSubConnState needs + // to hold it shortly to potentially delete from the map. + // + // UpdateState is called by the balancer state aggregator, and it will + // decide when and whether to call. + // + // The corresponding boolean incomingStarted is used to stop further updates + // from sub-balancers after they are closed. + incomingMu sync.Mutex + incomingStarted bool // This boolean only guards calls back to ClientConn. + scToSubBalancer map[balancer.SubConn]*subBalancerWrapper +} + +// Options wraps the arguments to be passed to the BalancerGroup ctor. +type Options struct { + // CC is a reference to the parent balancer.ClientConn. + CC balancer.ClientConn + // BuildOpts contains build options to be used when creating sub-balancers. + BuildOpts balancer.BuildOptions + // StateAggregator is an implementation of the BalancerStateAggregator + // interface to aggregate picker and connectivity states from sub-balancers. + StateAggregator BalancerStateAggregator + // Logger is a group specific prefix logger. + Logger *grpclog.PrefixLogger + // SubBalancerCloseTimeout is the amount of time deleted sub-balancers spend + // in the idle cache. A value of zero here disables caching of deleted + // sub-balancers. + SubBalancerCloseTimeout time.Duration +} + +// New creates a new BalancerGroup. Note that the BalancerGroup +// needs to be started to work. +func New(opts Options) *BalancerGroup { + var bc *cache.TimeoutCache + if opts.SubBalancerCloseTimeout != time.Duration(0) { + bc = cache.NewTimeoutCache(opts.SubBalancerCloseTimeout) + } + + return &BalancerGroup{ + cc: opts.CC, + buildOpts: opts.BuildOpts, + stateAggregator: opts.StateAggregator, + logger: opts.Logger, + + deletedBalancerCache: bc, + idToBalancerConfig: make(map[string]*subBalancerWrapper), + scToSubBalancer: make(map[balancer.SubConn]*subBalancerWrapper), + } +} + +// Start starts the balancer group, including building all the sub-balancers, +// and send the existing addresses to them. +// +// A BalancerGroup can be closed and started later. When a BalancerGroup is +// closed, it can still receive address updates, which will be applied when +// restarted. +func (bg *BalancerGroup) Start() { + bg.incomingMu.Lock() + bg.incomingStarted = true + bg.incomingMu.Unlock() + + bg.outgoingMu.Lock() + if bg.outgoingStarted { + bg.outgoingMu.Unlock() + return + } + + for _, config := range bg.idToBalancerConfig { + config.startBalancer() + } + bg.outgoingStarted = true + bg.outgoingMu.Unlock() +} + +// AddWithClientConn adds a balancer with the given id to the group. The +// balancer is built with a balancer builder registered with balancerName. The +// given ClientConn is passed to the newly built balancer instead of the +// one passed to balancergroup.New(). +// +// TODO: Get rid of the existing Add() API and replace it with this. +func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { + bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id) + builder := balancer.Get(balancerName) + if builder == nil { + return fmt.Errorf("unregistered balancer name %q", balancerName) + } + + // Store data in static map, and then check to see if bg is started. + bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() + var sbc *subBalancerWrapper + // If outgoingStarted is true, search in the cache. Otherwise, cache is + // guaranteed to be empty, searching is unnecessary. Also, skip the cache if + // caching is disabled. + if bg.outgoingStarted && bg.deletedBalancerCache != nil { + if old, ok := bg.deletedBalancerCache.Remove(id); ok { + if bg.logger.V(2) { + bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + sbc, _ = old.(*subBalancerWrapper) + if sbc != nil && sbc.builder != builder { + // If the sub-balancer in cache was built with a different + // balancer builder, don't use it, cleanup this old-balancer, + // and behave as sub-balancer is not found in cache. + // + // NOTE that this will also drop the cached addresses for this + // sub-balancer, which seems to be reasonable. + sbc.stopBalancer() + // cleanupSubConns must be done before the new balancer starts, + // otherwise new SubConns created by the new balancer might be + // removed by mistake. + bg.cleanupSubConns(sbc) + sbc = nil + } + } + } + if sbc == nil { + sbc = &subBalancerWrapper{ + ClientConn: cc, + id: id, + group: bg, + builder: builder, + buildOpts: bg.buildOpts, + } + if bg.outgoingStarted { + // Only start the balancer if bg is started. Otherwise, we only keep the + // static data. + sbc.startBalancer() + } + } else { + // When brining back a sub-balancer from cache, re-send the cached + // picker and state. + sbc.updateBalancerStateWithCachedPicker() + } + bg.idToBalancerConfig[id] = sbc + return nil +} + +// Add adds a balancer built by builder to the group, with given id. +func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { + bg.AddWithClientConn(id, builder.Name(), bg.cc) +} + +// Remove removes the balancer with id from the group. +// +// But doesn't close the balancer. The balancer is kept in a cache, and will be +// closed after timeout. Cleanup work (closing sub-balancer and removing +// subconns) will be done after timeout. +func (bg *BalancerGroup) Remove(id string) { + bg.logger.Infof("Removing child policy for child %q", id) + + bg.outgoingMu.Lock() + + sbToRemove, ok := bg.idToBalancerConfig[id] + if !ok { + bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id) + bg.outgoingMu.Unlock() + return + } + + // Unconditionally remove the sub-balancer config from the map. + delete(bg.idToBalancerConfig, id) + if !bg.outgoingStarted { + // Nothing needs to be done here, since we wouldn't have created the + // sub-balancer. + bg.outgoingMu.Unlock() + return + } + + if bg.deletedBalancerCache != nil { + if bg.logger.V(2) { + bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if bg.logger.V(2) { + bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. + bg.outgoingMu.Lock() + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) + }) + bg.outgoingMu.Unlock() + return + } + + // Remove the sub-balancer with immediate effect if we are not caching. + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) +} + +// bg.remove(id) doesn't do cleanup for the sub-balancer. This function does +// cleanup after the timeout. +func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { + bg.incomingMu.Lock() + // Remove SubConns. This is only done after the balancer is + // actually closed. + // + // NOTE: if NewSubConn is called by this (closed) balancer later, the + // SubConn will be leaked. This shouldn't happen if the balancer + // implementation is correct. To make sure this never happens, we need to + // add another layer (balancer manager) between balancer group and the + // sub-balancers. + for sc, b := range bg.scToSubBalancer { + if b == config { + delete(bg.scToSubBalancer, sc) + } + } + bg.incomingMu.Unlock() +} + +// connect attempts to connect to all subConns belonging to sb. +func (bg *BalancerGroup) connect(sb *subBalancerWrapper) { + bg.incomingMu.Lock() + for sc, b := range bg.scToSubBalancer { + if b == sb { + sc.Connect() + } + } + bg.incomingMu.Unlock() +} + +// Following are actions from the parent grpc.ClientConn, forward to sub-balancers. + +// updateSubConnState forwards the update to cb and updates scToSubBalancer if +// needed. +func (bg *BalancerGroup) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + bg.incomingMu.Lock() + if _, ok := bg.scToSubBalancer[sc]; !ok { + bg.incomingMu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + // Only delete sc from the map when state changed to Shutdown. + delete(bg.scToSubBalancer, sc) + } + bg.incomingMu.Unlock() + + bg.outgoingMu.Lock() + if cb != nil { + cb(state) + } + bg.outgoingMu.Unlock() +} + +// UpdateSubConnState handles the state for the subconn. It finds the +// corresponding balancer and forwards the update. +func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + bg.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// UpdateClientConnState handles ClientState (including balancer config and +// addresses) from resolver. It finds the balancer and forwards the update. +func (bg *BalancerGroup) UpdateClientConnState(id string, s balancer.ClientConnState) error { + bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() + if config, ok := bg.idToBalancerConfig[id]; ok { + return config.updateClientConnState(s) + } + return nil +} + +// ResolverError forwards resolver errors to all sub-balancers. +func (bg *BalancerGroup) ResolverError(err error) { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + config.resolverError(err) + } + bg.outgoingMu.Unlock() +} + +// Following are actions from sub-balancers, forward to ClientConn. + +// newSubConn: forward to ClientConn, and also create a map from sc to balancer, +// so state update will find the right balancer. +// +// One note about removing SubConn: only forward to ClientConn, but not delete +// from map. Delete sc from the map only when state changes to Shutdown. Since +// it's just forwarding the action, there's no need for a removeSubConn() +// wrapper function. +func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + // NOTE: if balancer with id was already removed, this should also return + // error. But since we call balancer.stopBalancer when removing the balancer, this + // shouldn't happen. + bg.incomingMu.Lock() + if !bg.incomingStarted { + bg.incomingMu.Unlock() + return nil, fmt.Errorf("NewSubConn is called after balancer group is closed") + } + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) } + sc, err := bg.cc.NewSubConn(addrs, opts) + if err != nil { + bg.incomingMu.Unlock() + return nil, err + } + bg.scToSubBalancer[sc] = config + bg.incomingMu.Unlock() + return sc, nil +} + +// updateBalancerState: forward the new state to balancer state aggregator. The +// aggregator will create an aggregated picker and an aggregated connectivity +// state, then forward to ClientConn. +func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { + bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state) + + // Send new state to the aggregator, without holding the incomingMu. + // incomingMu is to protect all calls to the parent ClientConn, this update + // doesn't necessary trigger a call to ClientConn, and should already be + // protected by aggregator's mutex if necessary. + if bg.stateAggregator != nil { + bg.stateAggregator.UpdateState(id, state) + } +} + +// Close closes the balancer. It stops sub-balancers, and removes the subconns. +// The BalancerGroup can be restarted later. +func (bg *BalancerGroup) Close() { + bg.incomingMu.Lock() + if bg.incomingStarted { + bg.incomingStarted = false + // Also remove all SubConns. + for sc := range bg.scToSubBalancer { + sc.Shutdown() + delete(bg.scToSubBalancer, sc) + } + } + bg.incomingMu.Unlock() + + // Clear(true) runs clear function to close sub-balancers in cache. It + // must be called out of outgoing mutex. + if bg.deletedBalancerCache != nil { + bg.deletedBalancerCache.Clear(true) + } + + bg.outgoingMu.Lock() + if bg.outgoingStarted { + bg.outgoingStarted = false + for _, config := range bg.idToBalancerConfig { + config.stopBalancer() + } + } + bg.outgoingMu.Unlock() +} + +// ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked. +// It will trigger this on all sub-balancers, or reconnect their subconns if +// not supported. +func (bg *BalancerGroup) ExitIdle() { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + if !config.exitIdle() { + bg.connect(config) + } + } + bg.outgoingMu.Unlock() +} + +// ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if +// appropriate and possible. +func (bg *BalancerGroup) ExitIdleOne(id string) { + bg.outgoingMu.Lock() + if config := bg.idToBalancerConfig[id]; config != nil { + if !config.exitIdle() { + bg.connect(config) + } + } + bg.outgoingMu.Unlock() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return gracefulswitch.ParseConfig(cfg) +} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go new file mode 100644 index 000000000..816869555 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancergroup + +import ( + "google.golang.org/grpc/balancer" +) + +// BalancerStateAggregator aggregates sub-picker and connectivity states into a +// state. +// +// It takes care of merging sub-picker into one picker. The picking config is +// passed directly from the parent to the aggregator implementation (instead +// via balancer group). +type BalancerStateAggregator interface { + // UpdateState updates the state of the id. + // + // It's up to the implementation whether this will trigger an update to the + // parent ClientConn. + UpdateState(id string, state balancer.State) +} diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go new file mode 100644 index 000000000..2fa487010 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go @@ -0,0 +1,151 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cache implements caches to be used in gRPC. +package cache + +import ( + "sync" + "time" +) + +type cacheEntry struct { + item any + // Note that to avoid deadlocks (potentially caused by lock ordering), + // callback can only be called without holding cache's mutex. + callback func() + timer *time.Timer + // deleted is set to true in Remove() when the call to timer.Stop() fails. + // This can happen when the timer in the cache entry fires around the same + // time that timer.stop() is called in Remove(). + deleted bool +} + +// TimeoutCache is a cache with items to be deleted after a timeout. +type TimeoutCache struct { + mu sync.Mutex + timeout time.Duration + cache map[any]*cacheEntry +} + +// NewTimeoutCache creates a TimeoutCache with the given timeout. +func NewTimeoutCache(timeout time.Duration) *TimeoutCache { + return &TimeoutCache{ + timeout: timeout, + cache: make(map[any]*cacheEntry), + } +} + +// Add adds an item to the cache, with the specified callback to be called when +// the item is removed from the cache upon timeout. If the item is removed from +// the cache using a call to Remove before the timeout expires, the callback +// will not be called. +// +// If the Add was successful, it returns (newly added item, true). If there is +// an existing entry for the specified key, the cache entry is not be updated +// with the specified item and it returns (existing item, false). +func (c *TimeoutCache) Add(key, item any, callback func()) (any, bool) { + c.mu.Lock() + defer c.mu.Unlock() + if e, ok := c.cache[key]; ok { + return e.item, false + } + + entry := &cacheEntry{ + item: item, + callback: callback, + } + entry.timer = time.AfterFunc(c.timeout, func() { + c.mu.Lock() + if entry.deleted { + c.mu.Unlock() + // Abort the delete since this has been taken care of in Remove(). + return + } + delete(c.cache, key) + c.mu.Unlock() + entry.callback() + }) + c.cache[key] = entry + return item, true +} + +// Remove the item with the key from the cache. +// +// If the specified key exists in the cache, it returns (item associated with +// key, true) and the callback associated with the item is guaranteed to be not +// called. If the given key is not found in the cache, it returns (nil, false) +func (c *TimeoutCache) Remove(key any) (item any, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + entry, ok := c.removeInternal(key) + if !ok { + return nil, false + } + return entry.item, true +} + +// removeInternal removes and returns the item with key. +// +// caller must hold c.mu. +func (c *TimeoutCache) removeInternal(key any) (*cacheEntry, bool) { + entry, ok := c.cache[key] + if !ok { + return nil, false + } + delete(c.cache, key) + if !entry.timer.Stop() { + // If stop was not successful, the timer has fired (this can only happen + // in a race). But the deleting function is blocked on c.mu because the + // mutex was held by the caller of this function. + // + // Set deleted to true to abort the deleting function. When the lock is + // released, the delete function will acquire the lock, check the value + // of deleted and return. + entry.deleted = true + } + return entry, true +} + +// Clear removes all entries, and runs the callbacks if runCallback is true. +func (c *TimeoutCache) Clear(runCallback bool) { + var entries []*cacheEntry + c.mu.Lock() + for key := range c.cache { + if e, ok := c.removeInternal(key); ok { + entries = append(entries, e) + } + } + c.mu.Unlock() + + if !runCallback { + return + } + + // removeInternal removes entries from cache, and also stops the timer, so + // the callback is guaranteed to be not called. If runCallback is true, + // manual execute all callbacks. + for _, entry := range entries { + entry.callback() + } +} + +// Len returns the number of entries in the cache. +func (c *TimeoutCache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.cache) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go new file mode 100644 index 000000000..dcff7ad62 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains non-user facing functionality of the xds credentials. +package xds + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "strings" + "unsafe" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/resolver" +) + +func init() { + internal.GetXDSHandshakeInfoForTesting = GetHandshakeInfo +} + +// handshakeAttrKey is the type used as the key to store HandshakeInfo in +// the Attributes field of resolver.Address. +type handshakeAttrKey struct{} + +// Equal reports whether the handshake info structs are identical. +func (hi *HandshakeInfo) Equal(other *HandshakeInfo) bool { + if hi == nil && other == nil { + return true + } + if hi == nil || other == nil { + return false + } + if hi.rootProvider != other.rootProvider || + hi.identityProvider != other.identityProvider || + hi.requireClientCert != other.requireClientCert || + len(hi.sanMatchers) != len(other.sanMatchers) { + return false + } + for i := range hi.sanMatchers { + if !hi.sanMatchers[i].Equal(other.sanMatchers[i]) { + return false + } + } + return true +} + +// SetHandshakeInfo returns a copy of addr in which the Attributes field is +// updated with hiPtr. +func SetHandshakeInfo(addr resolver.Address, hiPtr *unsafe.Pointer) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeAttrKey{}, hiPtr) + return addr +} + +// GetHandshakeInfo returns a pointer to the *HandshakeInfo stored in attr. +func GetHandshakeInfo(attr *attributes.Attributes) *unsafe.Pointer { + v := attr.Value(handshakeAttrKey{}) + hi, _ := v.(*unsafe.Pointer) + return hi +} + +// HandshakeInfo wraps all the security configuration required by client and +// server handshake methods in xds credentials. The xDS implementation will be +// responsible for populating these fields. +type HandshakeInfo struct { + // All fields written at init time and read only after that, so no + // synchronization needed. + rootProvider certprovider.Provider + identityProvider certprovider.Provider + sanMatchers []matcher.StringMatcher // Only on the client side. + requireClientCert bool // Only on server side. +} + +// NewHandshakeInfo returns a new handshake info configured with the provided +// options. +func NewHandshakeInfo(rootProvider certprovider.Provider, identityProvider certprovider.Provider, sanMatchers []matcher.StringMatcher, requireClientCert bool) *HandshakeInfo { + return &HandshakeInfo{ + rootProvider: rootProvider, + identityProvider: identityProvider, + sanMatchers: sanMatchers, + requireClientCert: requireClientCert, + } +} + +// UseFallbackCreds returns true when fallback credentials are to be used based +// on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) UseFallbackCreds() bool { + if hi == nil { + return true + } + return hi.identityProvider == nil && hi.rootProvider == nil +} + +// GetSANMatchersForTesting returns the SAN matchers stored in HandshakeInfo. +// To be used only for testing purposes. +func (hi *HandshakeInfo) GetSANMatchersForTesting() []matcher.StringMatcher { + return append([]matcher.StringMatcher{}, hi.sanMatchers...) +} + +// ClientSideTLSConfig constructs a tls.Config to be used in a client-side +// handshake based on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, error) { + // On the client side, rootProvider is mandatory. IdentityProvider is + // optional based on whether the client is doing TLS or mTLS. + if hi.rootProvider == nil { + return nil, errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server") + } + // Since the call to KeyMaterial() can block, we read the providers under + // the lock but call the actual function after releasing the lock. + rootProv, idProv := hi.rootProvider, hi.identityProvider + + // InsecureSkipVerify needs to be set to true because we need to perform + // custom verification to check the SAN on the received certificate. + // Currently the Go stdlib does complete verification of the cert (which + // includes hostname verification) or none. We are forced to go with the + // latter and perform the normal cert validation ourselves. + cfg := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{"h2"}, + } + + km, err := rootProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err) + } + cfg.RootCAs = km.Roots + + if idProv != nil { + km, err := idProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err) + } + cfg.Certificates = km.Certs + } + return cfg, nil +} + +// ServerSideTLSConfig constructs a tls.Config to be used in a server-side +// handshake based on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) ServerSideTLSConfig(ctx context.Context) (*tls.Config, error) { + cfg := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"h2"}, + } + // On the server side, identityProvider is mandatory. RootProvider is + // optional based on whether the server is doing TLS or mTLS. + if hi.identityProvider == nil { + return nil, errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server") + } + // Since the call to KeyMaterial() can block, we read the providers under + // the lock but call the actual function after releasing the lock. + rootProv, idProv := hi.rootProvider, hi.identityProvider + if hi.requireClientCert { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + // identityProvider is mandatory on the server side. + km, err := idProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err) + } + cfg.Certificates = km.Certs + + if rootProv != nil { + km, err := rootProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err) + } + cfg.ClientCAs = km.Roots + } + return cfg, nil +} + +// MatchingSANExists returns true if the SANs contained in cert match the +// criteria enforced by the list of SAN matchers in HandshakeInfo. +// +// If the list of SAN matchers in the HandshakeInfo is empty, this function +// returns true for all input certificates. +func (hi *HandshakeInfo) MatchingSANExists(cert *x509.Certificate) bool { + if len(hi.sanMatchers) == 0 { + return true + } + + // SANs can be specified in any of these four fields on the parsed cert. + for _, san := range cert.DNSNames { + if hi.matchSAN(san, true) { + return true + } + } + for _, san := range cert.EmailAddresses { + if hi.matchSAN(san, false) { + return true + } + } + for _, san := range cert.IPAddresses { + if hi.matchSAN(san.String(), false) { + return true + } + } + for _, san := range cert.URIs { + if hi.matchSAN(san.String(), false) { + return true + } + } + return false +} + +// Caller must hold mu. +func (hi *HandshakeInfo) matchSAN(san string, isDNS bool) bool { + for _, matcher := range hi.sanMatchers { + if em := matcher.ExactMatch(); em != "" && isDNS { + // This is a special case which is documented in the xDS protos. + // If the DNS SAN is a wildcard entry, and the match criteria is + // `exact`, then we need to perform DNS wildcard matching + // instead of regular string comparison. + if dnsMatch(em, san) { + return true + } + continue + } + if matcher.Match(san) { + return true + } + } + return false +} + +// dnsMatch implements a DNS wildcard matching algorithm based on RFC2828 and +// grpc-java's implementation in `OkHostnameVerifier` class. +// +// NOTE: Here the `host` argument is the one from the set of string matchers in +// the xDS proto and the `san` argument is a DNS SAN from the certificate, and +// this is the one which can potentially contain a wildcard pattern. +func dnsMatch(host, san string) bool { + // Add trailing "." and turn them into absolute domain names. + if !strings.HasSuffix(host, ".") { + host += "." + } + if !strings.HasSuffix(san, ".") { + san += "." + } + // Domain names are case-insensitive. + host = strings.ToLower(host) + san = strings.ToLower(san) + + // If san does not contain a wildcard, do exact match. + if !strings.Contains(san, "*") { + return host == san + } + + // Wildcard dns matching rules + // - '*' is only permitted in the left-most label and must be the only + // character in that label. For example, *.example.com is permitted, while + // *a.example.com, a*.example.com, a*b.example.com, a.*.example.com are + // not permitted. + // - '*' matches a single domain name component. For example, *.example.com + // matches test.example.com but does not match sub.test.example.com. + // - Wildcard patterns for single-label domain names are not permitted. + if san == "*." || !strings.HasPrefix(san, "*.") || strings.Contains(san[1:], "*") { + return false + } + // Optimization: at this point, we know that the san contains a '*' and + // is the first domain component of san. So, the host name must be at + // least as long as the san to be able to match. + if len(host) < len(san) { + return false + } + // Hostname must end with the non-wildcard portion of san. + if !strings.HasSuffix(host, san[1:]) { + return false + } + // At this point we know that the hostName and san share the same suffix + // (the non-wildcard portion of san). Now, we just need to make sure + // that the '*' does not match across domain components. + hostPrefix := strings.TrimSuffix(host, san[1:]) + return !strings.Contains(hostPrefix, ".") +} diff --git a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go new file mode 100644 index 000000000..c3baac364 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package hierarchy contains functions to set and get hierarchy string from +// addresses. +// +// This package is experimental. +package hierarchy + +import ( + "google.golang.org/grpc/resolver" +) + +type pathKeyType string + +const pathKey = pathKeyType("grpc.internal.address.hierarchical_path") + +type pathValue []string + +func (p pathValue) Equal(o any) bool { + op, ok := o.(pathValue) + if !ok { + return false + } + if len(op) != len(p) { + return false + } + for i, v := range p { + if v != op[i] { + return false + } + } + return true +} + +// Get returns the hierarchical path of addr. +func Get(addr resolver.Address) []string { + attrs := addr.BalancerAttributes + if attrs == nil { + return nil + } + path, _ := attrs.Value(pathKey).(pathValue) + return ([]string)(path) +} + +// Set overrides the hierarchical path in addr with path. +func Set(addr resolver.Address, path []string) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(pathKey, pathValue(path)) + return addr +} + +// Group splits a slice of addresses into groups based on +// the first hierarchy path. The first hierarchy path will be removed from the +// result. +// +// Input: +// [ +// +// {addr0, path: [p0, wt0]} +// {addr1, path: [p0, wt1]} +// {addr2, path: [p1, wt2]} +// {addr3, path: [p1, wt3]} +// +// ] +// +// Addresses will be split into p0/p1, and the p0/p1 will be removed from the +// path. +// +// Output: +// +// { +// p0: [ +// {addr0, path: [wt0]}, +// {addr1, path: [wt1]}, +// ], +// p1: [ +// {addr2, path: [wt2]}, +// {addr3, path: [wt3]}, +// ], +// } +// +// If hierarchical path is not set, or has no path in it, the address is +// dropped. +func Group(addrs []resolver.Address) map[string][]resolver.Address { + ret := make(map[string][]resolver.Address) + for _, addr := range addrs { + oldPath := Get(addr) + if len(oldPath) == 0 { + continue + } + curPath := oldPath[0] + newPath := oldPath[1:] + newAddr := Set(addr, newPath) + ret[curPath] = append(ret[curPath], newAddr) + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go new file mode 100644 index 000000000..703091047 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -0,0 +1,387 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Possible reasons for making a request. +type RouteLookupRequest_Reason int32 + +const ( + RouteLookupRequest_REASON_UNKNOWN RouteLookupRequest_Reason = 0 // Unused + RouteLookupRequest_REASON_MISS RouteLookupRequest_Reason = 1 // No data available in local cache + RouteLookupRequest_REASON_STALE RouteLookupRequest_Reason = 2 // Data in local cache is stale +) + +// Enum value maps for RouteLookupRequest_Reason. +var ( + RouteLookupRequest_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_MISS", + 2: "REASON_STALE", + } + RouteLookupRequest_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_MISS": 1, + "REASON_STALE": 2, + } +) + +func (x RouteLookupRequest_Reason) Enum() *RouteLookupRequest_Reason { + p := new(RouteLookupRequest_Reason) + *p = x + return p +} + +func (x RouteLookupRequest_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteLookupRequest_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_lookup_v1_rls_proto_enumTypes[0].Descriptor() +} + +func (RouteLookupRequest_Reason) Type() protoreflect.EnumType { + return &file_grpc_lookup_v1_rls_proto_enumTypes[0] +} + +func (x RouteLookupRequest_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteLookupRequest_Reason.Descriptor instead. +func (RouteLookupRequest_Reason) EnumDescriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0, 0} +} + +type RouteLookupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target type allows the client to specify what kind of target format it + // would like from RLS to allow it to find the regional server, e.g. "grpc". + TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + // Reason for making this request. + Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` + // For REASON_STALE, the header_data from the stale response, if any. + StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` + // Map of key values extracted via key builders for the gRPC or HTTP request. + KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *RouteLookupRequest) Reset() { + *x = RouteLookupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupRequest) ProtoMessage() {} + +func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupRequest.ProtoReflect.Descriptor instead. +func (*RouteLookupRequest) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0} +} + +func (x *RouteLookupRequest) GetTargetType() string { + if x != nil { + return x.TargetType + } + return "" +} + +func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { + if x != nil { + return x.Reason + } + return RouteLookupRequest_REASON_UNKNOWN +} + +func (x *RouteLookupRequest) GetStaleHeaderData() string { + if x != nil { + return x.StaleHeaderData + } + return "" +} + +func (x *RouteLookupRequest) GetKeyMap() map[string]string { + if x != nil { + return x.KeyMap + } + return nil +} + +func (x *RouteLookupRequest) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +type RouteLookupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Prioritized list (best one first) of addressable entities to use + // for routing, using syntax requested by the request target_type. + // The targets will be tried in order until a healthy one is found. + Targets []string `protobuf:"bytes,3,rep,name=targets,proto3" json:"targets,omitempty"` + // Optional header value to pass along to AFE in the X-Google-RLS-Data header. + // Cached with "target" and sent with all requests that match the request key. + // Allows the RLS to pass its work product to the eventual target. + HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *RouteLookupResponse) Reset() { + *x = RouteLookupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupResponse) ProtoMessage() {} + +func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupResponse.ProtoReflect.Descriptor instead. +func (*RouteLookupResponse) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{1} +} + +func (x *RouteLookupResponse) GetTargets() []string { + if x != nil { + return x.Targets + } + return nil +} + +func (x *RouteLookupResponse) GetHeaderData() string { + if x != nil { + return x.HeaderData + } + return "" +} + +func (x *RouteLookupResponse) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, + 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0x94, 0x01, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_proto_rawDescData = file_grpc_lookup_v1_rls_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_grpc_lookup_v1_rls_proto_goTypes = []any{ + (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason + (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest + (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse + nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ + 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason + 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + 4, // 2: grpc.lookup.v1.RouteLookupRequest.extensions:type_name -> google.protobuf.Any + 4, // 3: grpc.lookup.v1.RouteLookupResponse.extensions:type_name -> google.protobuf.Any + 1, // 4: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 5: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_proto_init() } +func file_grpc_lookup_v1_rls_proto_init() { + if File_grpc_lookup_v1_rls_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_lookup_v1_rls_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_proto_depIdxs, + EnumInfos: file_grpc_lookup_v1_rls_proto_enumTypes, + MessageInfos: file_grpc_lookup_v1_rls_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_proto = out.File + file_grpc_lookup_v1_rls_proto_rawDesc = nil + file_grpc_lookup_v1_rls_proto_goTypes = nil + file_grpc_lookup_v1_rls_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go new file mode 100644 index 000000000..a0be3c8cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -0,0 +1,949 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/lookup/v1/rls_config.proto + +package grpc_lookup_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Extract a key based on a given name (e.g. header name or query parameter +// name). The name must match one of the names listed in the "name" field. If +// the "required_match" field is true, one of the specified names must be +// present for the keybuilder to match. +type NameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name that will be used in the RLS key_map to refer to this value. + // If required_match is true, you may omit this field or set it to an empty + // string, in which case the matcher will require a match, but won't update + // the key_map. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Ordered list of names (headers or query parameter names) that can supply + // this value; the first one with a non-empty value is used. + Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` + // If true, make this extraction required; the key builder will not match + // if no value is found. + RequiredMatch bool `protobuf:"varint,3,opt,name=required_match,json=requiredMatch,proto3" json:"required_match,omitempty"` +} + +func (x *NameMatcher) Reset() { + *x = NameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NameMatcher) ProtoMessage() {} + +func (x *NameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NameMatcher.ProtoReflect.Descriptor instead. +func (*NameMatcher) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{0} +} + +func (x *NameMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *NameMatcher) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +func (x *NameMatcher) GetRequiredMatch() bool { + if x != nil { + return x.RequiredMatch + } + return false +} + +// A GrpcKeyBuilder applies to a given gRPC service, name, and headers. +type GrpcKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names []*GrpcKeyBuilder_Name `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + ExtraKeys *GrpcKeyBuilder_ExtraKeys `protobuf:"bytes,3,opt,name=extra_keys,json=extraKeys,proto3" json:"extra_keys,omitempty"` + // Extract keys from all listed headers. + // For gRPC, it is an error to specify "required_match" on the NameMatcher + // protos. + Headers []*NameMatcher `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing the actual method, but need to + // separately cache and request all the matched methods. + ConstantKeys map[string]string `protobuf:"bytes,4,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GrpcKeyBuilder) Reset() { + *x = GrpcKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder) ProtoMessage() {} + +func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1} +} + +func (x *GrpcKeyBuilder) GetNames() []*GrpcKeyBuilder_Name { + if x != nil { + return x.Names + } + return nil +} + +func (x *GrpcKeyBuilder) GetExtraKeys() *GrpcKeyBuilder_ExtraKeys { + if x != nil { + return x.ExtraKeys + } + return nil +} + +func (x *GrpcKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +// An HttpKeyBuilder applies to a given HTTP URL and headers. +// +// Path and host patterns use the matching syntax from gRPC transcoding to +// extract named key/value pairs from the path and host components of the URL: +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +// +// It is invalid to specify the same key name in multiple places in a pattern. +// +// For a service where the project id can be expressed either as a subdomain or +// in the path, separate HttpKeyBuilders must be used: +// +// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' +// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// +// If the host is exactly 'example.com', the first path segment will be used as +// the id and the second segment as the object. If the host has a subdomain, the +// subdomain will be used as the id and the first segment as the object. If +// neither pattern matches, no keys will be extracted. +type HttpKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // host_pattern is an ordered list of host template patterns for the desired + // value. If any host_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A host + // consists of labels separated by dots. Each label is matched against the + // label in the pattern as follows: + // - "*": Matches any single label. + // - "**": Matches zero or more labels (first or last part of host only). + // - "{=...}": One or more label capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single label capture. Identical to {=*}. + // + // Examples: + // - "example.com": Only applies to the exact host example.com. + // - "*.example.com": Matches subdomains of example.com. + // - "**.example.com": matches example.com, and all levels of subdomains. + // - "{project}.example.com": Extracts the third level subdomain. + // - "{project=**}.example.com": Extracts the third level+ subdomains. + // - "{project=**}": Extracts the entire host. + HostPatterns []string `protobuf:"bytes,1,rep,name=host_patterns,json=hostPatterns,proto3" json:"host_patterns,omitempty"` + // path_pattern is an ordered list of path template patterns for the desired + // value. If any path_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A path + // consists of segments separated by slashes. Each segment is matched against + // the segment in the pattern as follows: + // - "*": Matches any single segment. + // - "**": Matches zero or more segments (first or last part of path only). + // - "{=...}": One or more segment capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single segment capture. Identical to {=*}. + // + // A custom method may also be specified by appending ":" and the custom + // method name or "*" to indicate any custom method (including no custom + // method). For example, "/*/projects/{project_id}/**:*" extracts + // `{project_id}` for any version, resource and custom method that includes + // it. By default, any custom method will be matched. + // + // Examples: + // - "/v1/{name=messages/*}": extracts a name like "messages/12345". + // - "/v1/messages/{message_id}": extracts a message_id like "12345". + // - "/v1/users/{user_id}/messages/{message_id}": extracts two key values. + PathPatterns []string `protobuf:"bytes,2,rep,name=path_patterns,json=pathPatterns,proto3" json:"path_patterns,omitempty"` + // List of query parameter names to try to match. + // For example: ["parent", "name", "resource.name"] + // We extract all the specified query_parameters (case-sensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given parameter appears multiple times (?foo=a&foo=b) we + // will report it as a comma-separated string (foo=a,b). + QueryParameters []*NameMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` + // List of headers to try to match. + // We extract all the specified header values (case-insensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given header appears multiple times in the request we will + // report it as a comma-separated string, in standard HTTP fashion. + Headers []*NameMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing a lot of information from the URL, but + // need to separately cache and request URLs with that content. + ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the HTTP method/verb will be extracted under this key name. + Method string `protobuf:"bytes,6,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *HttpKeyBuilder) Reset() { + *x = HttpKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpKeyBuilder) ProtoMessage() {} + +func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpKeyBuilder.ProtoReflect.Descriptor instead. +func (*HttpKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpKeyBuilder) GetHostPatterns() []string { + if x != nil { + return x.HostPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetPathPatterns() []string { + if x != nil { + return x.PathPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetQueryParameters() []*NameMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +func (x *HttpKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +func (x *HttpKeyBuilder) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +type RouteLookupConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Ordered specifications for constructing keys for HTTP requests. Last + // match wins. If no HttpKeyBuilder matches, an empty key_map will be sent to + // the lookup service; it should likely reply with a global default route + // and raise an alert. + HttpKeybuilders []*HttpKeyBuilder `protobuf:"bytes,1,rep,name=http_keybuilders,json=httpKeybuilders,proto3" json:"http_keybuilders,omitempty"` + // Unordered specifications for constructing keys for gRPC requests. All + // GrpcKeyBuilders on this list must have unique "name" fields so that the + // client is free to prebuild a hash map keyed by name. If no GrpcKeyBuilder + // matches, an empty key_map will be sent to the lookup service; it should + // likely reply with a global default route and raise an alert. + GrpcKeybuilders []*GrpcKeyBuilder `protobuf:"bytes,2,rep,name=grpc_keybuilders,json=grpcKeybuilders,proto3" json:"grpc_keybuilders,omitempty"` + // The name of the lookup service as a gRPC URI. Typically, this will be + // a subdomain of the target, such as "lookup.datastore.googleapis.com". + LookupService string `protobuf:"bytes,3,opt,name=lookup_service,json=lookupService,proto3" json:"lookup_service,omitempty"` + // Configure a timeout value for lookup service requests. + // Defaults to 10 seconds if not specified. + LookupServiceTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=lookup_service_timeout,json=lookupServiceTimeout,proto3" json:"lookup_service_timeout,omitempty"` + // How long are responses valid for (like HTTP Cache-Control). + // If omitted or zero, the longest valid cache time is used. + // This value is clamped to 5 minutes to avoid unflushable bad responses. + MaxAge *durationpb.Duration `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // After a response has been in the client cache for this amount of time + // and is re-requested, start an asynchronous RPC to re-validate it. + // This value should be less than max_age by at least the length of a + // typical RTT to the Route Lookup Service to fully mask the RTT latency. + // If omitted, keys are only re-requested after they have expired. + StaleAge *durationpb.Duration `protobuf:"bytes,6,opt,name=stale_age,json=staleAge,proto3" json:"stale_age,omitempty"` + // Rough indicator of amount of memory to use for the client cache. Some of + // the data structure overhead is not accounted for, so actual memory consumed + // will be somewhat greater than this value. If this field is omitted or set + // to zero, a client default will be used. The value may be capped to a lower + // amount based on client configuration. + CacheSizeBytes int64 `protobuf:"varint,7,opt,name=cache_size_bytes,json=cacheSizeBytes,proto3" json:"cache_size_bytes,omitempty"` + // This is a list of all the possible targets that can be returned by the + // lookup service. If a target not on this list is returned, it will be + // treated the same as an unhealthy target. + ValidTargets []string `protobuf:"bytes,8,rep,name=valid_targets,json=validTargets,proto3" json:"valid_targets,omitempty"` + // This value provides a default target to use if needed. If set, it will be + // used if RLS returns an error, times out, or returns an invalid response. + // Note that requests can be routed only to a subdomain of the original + // target, e.g. "us_east_1.cloudbigtable.googleapis.com". + DefaultTarget string `protobuf:"bytes,9,opt,name=default_target,json=defaultTarget,proto3" json:"default_target,omitempty"` +} + +func (x *RouteLookupConfig) Reset() { + *x = RouteLookupConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupConfig) ProtoMessage() {} + +func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupConfig.ProtoReflect.Descriptor instead. +func (*RouteLookupConfig) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{3} +} + +func (x *RouteLookupConfig) GetHttpKeybuilders() []*HttpKeyBuilder { + if x != nil { + return x.HttpKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetGrpcKeybuilders() []*GrpcKeyBuilder { + if x != nil { + return x.GrpcKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetLookupService() string { + if x != nil { + return x.LookupService + } + return "" +} + +func (x *RouteLookupConfig) GetLookupServiceTimeout() *durationpb.Duration { + if x != nil { + return x.LookupServiceTimeout + } + return nil +} + +func (x *RouteLookupConfig) GetMaxAge() *durationpb.Duration { + if x != nil { + return x.MaxAge + } + return nil +} + +func (x *RouteLookupConfig) GetStaleAge() *durationpb.Duration { + if x != nil { + return x.StaleAge + } + return nil +} + +func (x *RouteLookupConfig) GetCacheSizeBytes() int64 { + if x != nil { + return x.CacheSizeBytes + } + return 0 +} + +func (x *RouteLookupConfig) GetValidTargets() []string { + if x != nil { + return x.ValidTargets + } + return nil +} + +func (x *RouteLookupConfig) GetDefaultTarget() string { + if x != nil { + return x.DefaultTarget + } + return "" +} + +// RouteLookupClusterSpecifier is used in xDS to represent a cluster specifier +// plugin for RLS. +type RouteLookupClusterSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The RLS config for this cluster specifier plugin instance. + RouteLookupConfig *RouteLookupConfig `protobuf:"bytes,1,opt,name=route_lookup_config,json=routeLookupConfig,proto3" json:"route_lookup_config,omitempty"` +} + +func (x *RouteLookupClusterSpecifier) Reset() { + *x = RouteLookupClusterSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupClusterSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupClusterSpecifier) ProtoMessage() {} + +func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupClusterSpecifier.ProtoReflect.Descriptor instead. +func (*RouteLookupClusterSpecifier) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RouteLookupClusterSpecifier) GetRouteLookupConfig() *RouteLookupConfig { + if x != nil { + return x.RouteLookupConfig + } + return nil +} + +// To match, one of the given Name fields must match; the service and method +// fields are specified as fixed strings. The service name is required and +// includes the proto package name. The method name may be omitted, in +// which case any method on the given service is matched. +type GrpcKeyBuilder_Name struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_Name) Reset() { + *x = GrpcKeyBuilder_Name{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_Name) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_Name) ProtoMessage() {} + +func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_Name.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_Name) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *GrpcKeyBuilder_Name) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_Name) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +// If you wish to include the host, service, or method names as keys in the +// generated RouteLookupRequest, specify key names to use in the extra_keys +// submessage. If a key name is empty, no key will be set for that value. +// If this submessage is specified, the normal host/path fields will be left +// unset in the RouteLookupRequest. We are deprecating host/path in the +// RouteLookupRequest, so services should migrate to the ExtraKeys approach. +type GrpcKeyBuilder_ExtraKeys struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,3,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_ExtraKeys) Reset() { + *x = GrpcKeyBuilder_ExtraKeys{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_ExtraKeys) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} + +func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_ExtraKeys.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_ExtraKeys) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +var File_grpc_lookup_v1_rls_config_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, + 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5c, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0xf0, 0x03, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x12, 0x39, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x09, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x51, + 0x0a, 0x09, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x89, 0x03, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f, + 0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, + 0x46, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x3f, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, + 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, + 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, + 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_config_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_config_proto_rawDescData = file_grpc_lookup_v1_rls_config_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_config_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_config_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_config_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_grpc_lookup_v1_rls_config_proto_goTypes = []any{ + (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher + (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder + (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder + (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig + (*RouteLookupClusterSpecifier)(nil), // 4: grpc.lookup.v1.RouteLookupClusterSpecifier + (*GrpcKeyBuilder_Name)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.Name + (*GrpcKeyBuilder_ExtraKeys)(nil), // 6: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + nil, // 7: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + nil, // 8: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_grpc_lookup_v1_rls_config_proto_depIdxs = []int32{ + 5, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name + 6, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + 0, // 2: grpc.lookup.v1.GrpcKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 7, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + 0, // 4: grpc.lookup.v1.HttpKeyBuilder.query_parameters:type_name -> grpc.lookup.v1.NameMatcher + 0, // 5: grpc.lookup.v1.HttpKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 8, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + 2, // 7: grpc.lookup.v1.RouteLookupConfig.http_keybuilders:type_name -> grpc.lookup.v1.HttpKeyBuilder + 1, // 8: grpc.lookup.v1.RouteLookupConfig.grpc_keybuilders:type_name -> grpc.lookup.v1.GrpcKeyBuilder + 9, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration + 9, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration + 9, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration + 3, // 12: grpc.lookup.v1.RouteLookupClusterSpecifier.route_lookup_config:type_name -> grpc.lookup.v1.RouteLookupConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_config_proto_init() } +func file_grpc_lookup_v1_rls_config_proto_init() { + if File_grpc_lookup_v1_rls_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*HttpKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupClusterSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_lookup_v1_rls_config_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_config_proto_depIdxs, + MessageInfos: file_grpc_lookup_v1_rls_config_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_config_proto = out.File + file_grpc_lookup_v1_rls_config_proto_rawDesc = nil + file_grpc_lookup_v1_rls_config_proto_goTypes = nil + file_grpc_lookup_v1_rls_config_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go new file mode 100644 index 000000000..23dcb2100 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -0,0 +1,137 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + RouteLookupService_RouteLookup_FullMethodName = "/grpc.lookup.v1.RouteLookupService/RouteLookup" +) + +// RouteLookupServiceClient is the client API for RouteLookupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RouteLookupServiceClient interface { + // Lookup returns a target for a single key. + RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) +} + +type routeLookupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupServiceClient { + return &routeLookupServiceClient{cc} +} + +func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RouteLookupResponse) + err := c.cc.Invoke(ctx, RouteLookupService_RouteLookup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RouteLookupServiceServer is the server API for RouteLookupService service. +// All implementations must embed UnimplementedRouteLookupServiceServer +// for forward compatibility. +type RouteLookupServiceServer interface { + // Lookup returns a target for a single key. + RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) + mustEmbedUnimplementedRouteLookupServiceServer() +} + +// UnimplementedRouteLookupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRouteLookupServiceServer struct{} + +func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") +} +func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} +func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {} + +// UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RouteLookupServiceServer will +// result in compilation errors. +type UnsafeRouteLookupServiceServer interface { + mustEmbedUnimplementedRouteLookupServiceServer() +} + +func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { + // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&RouteLookupService_ServiceDesc, srv) +} + +func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RouteLookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RouteLookupService_RouteLookup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RouteLookupService_ServiceDesc is the grpc.ServiceDesc for RouteLookupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RouteLookupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lookup.v1.RouteLookupService", + HandlerType: (*RouteLookupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RouteLookup", + Handler: _RouteLookupService_RouteLookup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/lookup/v1/rls.proto", +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86c..924ba4f36 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -616,7 +616,7 @@ func (t *transportReader) ReadHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } diff --git a/vendor/google.golang.org/grpc/internal/wrr/edf.go b/vendor/google.golang.org/grpc/internal/wrr/edf.go new file mode 100644 index 000000000..a06656f46 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/edf.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wrr + +import ( + "container/heap" + "sync" +) + +// edfWrr is a struct for EDF weighted round robin implementation. +type edfWrr struct { + lock sync.Mutex + items edfPriorityQueue + currentOrderOffset uint64 + currentTime float64 +} + +// NewEDF creates Earliest Deadline First (EDF) +// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling) implementation for weighted round robin. +// Each pick from the schedule has the earliest deadline entry selected. Entries have deadlines set +// at current time + 1 / weight, providing weighted round robin behavior with O(log n) pick time. +func NewEDF() WRR { + return &edfWrr{} +} + +// edfEntry is an internal wrapper for item that also stores weight and relative position in the queue. +type edfEntry struct { + deadline float64 + weight int64 + orderOffset uint64 + item any +} + +// edfPriorityQueue is a heap.Interface implementation for edfEntry elements. +type edfPriorityQueue []*edfEntry + +func (pq edfPriorityQueue) Len() int { return len(pq) } +func (pq edfPriorityQueue) Less(i, j int) bool { + return pq[i].deadline < pq[j].deadline || pq[i].deadline == pq[j].deadline && pq[i].orderOffset < pq[j].orderOffset +} +func (pq edfPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } + +func (pq *edfPriorityQueue) Push(x any) { + *pq = append(*pq, x.(*edfEntry)) +} + +func (pq *edfPriorityQueue) Pop() any { + old := *pq + *pq = old[0 : len(old)-1] + return old[len(old)-1] +} + +func (edf *edfWrr) Add(item any, weight int64) { + edf.lock.Lock() + defer edf.lock.Unlock() + entry := edfEntry{ + deadline: edf.currentTime + 1.0/float64(weight), + weight: weight, + item: item, + orderOffset: edf.currentOrderOffset, + } + edf.currentOrderOffset++ + heap.Push(&edf.items, &entry) +} + +func (edf *edfWrr) Next() any { + edf.lock.Lock() + defer edf.lock.Unlock() + if len(edf.items) == 0 { + return nil + } + item := edf.items[0] + edf.currentTime = item.deadline + item.deadline = edf.currentTime + 1.0/float64(item.weight) + heap.Fix(&edf.items, 0) + return item.item +} diff --git a/vendor/google.golang.org/grpc/internal/wrr/random.go b/vendor/google.golang.org/grpc/internal/wrr/random.go new file mode 100644 index 000000000..3f611a350 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wrr + +import ( + "fmt" + "math/rand" + "sort" +) + +// weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. +type weightedItem struct { + item any + weight int64 + accumulatedWeight int64 +} + +func (w *weightedItem) String() string { + return fmt.Sprint(*w) +} + +// randomWRR is a struct that contains weighted items implement weighted random algorithm. +type randomWRR struct { + items []*weightedItem + // Are all item's weights equal + equalWeights bool +} + +// NewRandom creates a new WRR with random. +func NewRandom() WRR { + return &randomWRR{} +} + +var randInt63n = rand.Int63n + +func (rw *randomWRR) Next() (item any) { + if len(rw.items) == 0 { + return nil + } + if rw.equalWeights { + return rw.items[randInt63n(int64(len(rw.items)))].item + } + + sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight + // Random number in [0, sumOfWeights). + randomWeight := randInt63n(sumOfWeights) + // Item's accumulated weights are in ascending order, because item's weight >= 0. + // Binary search rw.items to find first item whose accumulatedWeight > randomWeight + // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight + i := sort.Search(len(rw.items), func(i int) bool { return rw.items[i].accumulatedWeight > randomWeight }) + return rw.items[i].item +} + +func (rw *randomWRR) Add(item any, weight int64) { + accumulatedWeight := weight + equalWeights := true + if len(rw.items) > 0 { + lastItem := rw.items[len(rw.items)-1] + accumulatedWeight = lastItem.accumulatedWeight + weight + equalWeights = rw.equalWeights && weight == lastItem.weight + } + rw.equalWeights = equalWeights + rItem := &weightedItem{item: item, weight: weight, accumulatedWeight: accumulatedWeight} + rw.items = append(rw.items, rItem) +} + +func (rw *randomWRR) String() string { + return fmt.Sprint(rw.items) +} diff --git a/vendor/google.golang.org/grpc/internal/wrr/wrr.go b/vendor/google.golang.org/grpc/internal/wrr/wrr.go new file mode 100644 index 000000000..91a40d735 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/wrr.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package wrr contains the interface and common implementations of wrr +// algorithms. +package wrr + +// WRR defines an interface that implements weighted round robin. +type WRR interface { + // Add adds an item with weight to the WRR set. Add must be only called + // before any calls to Next. + Add(item any, weight int64) + // Next returns the next picked item. + // + // Next needs to be thread safe. Add may not be called after any call to + // Next. + Next() any +} diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go new file mode 100644 index 000000000..8317859e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go @@ -0,0 +1,828 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to initialize certain aspects +// of an xDS client by reading a bootstrap file. +package bootstrap + +import ( + "bytes" + "encoding/json" + "fmt" + "maps" + "net/url" + "os" + "slices" + "strings" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/bootstrap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +const ( + serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" +) + +// For overriding in unit tests. +var bootstrapFileReadFunc = os.ReadFile + +// ChannelCreds contains the credentials to be used while communicating with an +// xDS server. It is also used to dedup servers with the same server URI. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type ChannelCreds struct { + // Type contains a unique name identifying the credentials type. The only + // supported types currently are "google_default" and "insecure". + Type string `json:"type,omitempty"` + // Config contains the JSON configuration associated with the credentials. + Config json.RawMessage `json:"config,omitempty"` +} + +// Equal reports whether cc and other are considered equal. +func (cc ChannelCreds) Equal(other ChannelCreds) bool { + return cc.Type == other.Type && bytes.Equal(cc.Config, other.Config) +} + +// String returns a string representation of the credentials. It contains the +// type and the config (if non-nil) separated by a "-". +func (cc ChannelCreds) String() string { + if cc.Config == nil { + return cc.Type + } + + // We do not expect the Marshal call to fail since we wrote to cc.Config + // after a successful unmarshalling from JSON configuration. Therefore, + // it is safe to ignore the error here. + b, _ := json.Marshal(cc.Config) + return cc.Type + "-" + string(b) +} + +// ServerConfigs represents a collection of server configurations. +type ServerConfigs []*ServerConfig + +// Equal returns true if scs equals other. +func (scs *ServerConfigs) Equal(other *ServerConfigs) bool { + if len(*scs) != len(*other) { + return false + } + for i := range *scs { + if !(*scs)[i].Equal((*other)[i]) { + return false + } + } + return true +} + +// UnmarshalJSON takes the json data (a list of server configurations) and +// unmarshals it to the struct. +func (scs *ServerConfigs) UnmarshalJSON(data []byte) error { + servers := []*ServerConfig{} + if err := json.Unmarshal(data, &servers); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data)) + } + // Only use the first server config if fallback support is disabled. + if !envconfig.XDSFallbackSupport { + if len(servers) > 1 { + servers = servers[:1] + } + } + *scs = servers + return nil +} + +// String returns a string representation of the ServerConfigs, by concatenating +// the string representations of the underlying server configs. +func (scs *ServerConfigs) String() string { + ret := "" + for i, sc := range *scs { + if i > 0 { + ret += ", " + } + ret += sc.String() + } + return ret +} + +// Authority contains configuration for an xDS control plane authority. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. + // + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"` + // XDSServers contains the list of server configurations for this authority. + XDSServers ServerConfigs `json:"xds_servers,omitempty"` +} + +// Equal returns true if a equals other. +func (a *Authority) Equal(other *Authority) bool { + switch { + case a == nil && other == nil: + return true + case (a != nil) != (other != nil): + return false + case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate: + return false + case !a.XDSServers.Equal(&other.XDSServers): + return false + } + return true +} + +// ServerConfig contains the configuration to connect to a server. +type ServerConfig struct { + serverURI string + channelCreds []ChannelCreds + serverFeatures []string + + // As part of unmarshalling the JSON config into this struct, we ensure that + // the credentials config is valid by building an instance of the specified + // credentials and store it here for easy access. + selectedCreds ChannelCreds + credsDialOption grpc.DialOption + + cleanups []func() +} + +// ServerURI returns the URI of the management server to connect to. +func (sc *ServerConfig) ServerURI() string { + return sc.serverURI +} + +// ChannelCreds returns the credentials configuration to use when communicating +// with this server. Also used to dedup servers with the same server URI. +func (sc *ServerConfig) ChannelCreds() []ChannelCreds { + return sc.channelCreds +} + +// ServerFeatures returns the list of features supported by this server. Also +// used to dedup servers with the same server URI and channel creds. +func (sc *ServerConfig) ServerFeatures() []string { + return sc.serverFeatures +} + +// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a +// feature where the xDS client can ignore resource deletions from this server, +// as described in gRFC A53. +// +// This feature controls the behavior of the xDS client when the server deletes +// a previously sent Listener or Cluster resource. If set, the xDS client will +// not invoke the watchers' OnResourceDoesNotExist() method when a resource is +// deleted, nor will it remove the existing resource value from its cache. +func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { + for _, sf := range sc.serverFeatures { + if sf == serverFeaturesIgnoreResourceDeletion { + return true + } + } + return false +} + +// CredsDialOption returns the first supported transport credentials from the +// configuration, as a dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption +} + +// Cleanups returns a collection of functions to be called when the xDS client +// for this server is closed. Allows cleaning up resources created specifically +// for this server. +func (sc *ServerConfig) Cleanups() []func() { + return sc.cleanups +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.serverURI != other.serverURI: + return false + case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }): + return false + case !slices.Equal(sc.serverFeatures, other.serverFeatures): + return false + case !sc.selectedCreds.Equal(other.selectedCreds): + return false + } + return true +} + +// String returns the string representation of the ServerConfig. +func (sc *ServerConfig) String() string { + if len(sc.serverFeatures) == 0 { + return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String()) + } + features := strings.Join(sc.serverFeatures, "-") + return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-") +} + +// The following fields correspond 1:1 with the JSON schema for ServerConfig. +type serverConfigJSON struct { + ServerURI string `json:"server_uri,omitempty"` + ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"` + ServerFeatures []string `json:"server_features,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this server config. +func (sc *ServerConfig) MarshalJSON() ([]byte, error) { + server := &serverConfigJSON{ + ServerURI: sc.serverURI, + ChannelCreds: sc.channelCreds, + ServerFeatures: sc.serverFeatures, + } + return json.Marshal(server) +} + +// UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. +func (sc *ServerConfig) UnmarshalJSON(data []byte) error { + server := serverConfigJSON{} + if err := json.Unmarshal(data, &server); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data)) + } + + sc.serverURI = server.ServerURI + sc.channelCreds = server.ChannelCreds + sc.serverFeatures = server.ServerFeatures + + for _, cc := range server.ChannelCreds { + // We stop at the first credential type that we support. + c := bootstrap.GetCredentials(cc.Type) + if c == nil { + continue + } + bundle, cancel, err := c.Build(cc.Config) + if err != nil { + return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) + } + sc.selectedCreds = cc + sc.credsDialOption = grpc.WithCredentialsBundle(bundle) + sc.cleanups = append(sc.cleanups, cancel) + break + } + if sc.serverURI == "" { + return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data)) + } + if sc.credsDialOption == nil { + return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data)) + } + return nil +} + +// ServerConfigTestingOptions specifies options for creating a new ServerConfig +// for testing purposes. +// +// # Testing-Only +type ServerConfigTestingOptions struct { + // URI is the name of the server corresponding to this server config. + URI string + // ChannelCreds contains a list of channel credentials to use when talking + // to this server. If unspecified, `insecure` credentials will be used. + ChannelCreds []ChannelCreds + // ServerFeatures represents the list of features supported by this server. + ServerFeatures []string +} + +// ServerConfigForTesting creates a new ServerConfig from the passed in options, +// for testing purposes. +// +// # Testing-Only +func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) { + cc := opts.ChannelCreds + if cc == nil { + cc = []ChannelCreds{{Type: "insecure"}} + } + scInternal := &serverConfigJSON{ + ServerURI: opts.URI, + ChannelCreds: cc, + ServerFeatures: opts.ServerFeatures, + } + scJSON, err := json.Marshal(scInternal) + if err != nil { + return nil, err + } + + sc := new(ServerConfig) + if err := sc.UnmarshalJSON(scJSON); err != nil { + return nil, err + } + return sc, nil +} + +// Config is the internal representation of the bootstrap configuration provided +// to the xDS client. +type Config struct { + xDSServers ServerConfigs + cpcs map[string]certproviderNameAndConfig + serverListenerResourceNameTemplate string + clientDefaultListenerResourceNameTemplate string + authorities map[string]*Authority + node node + + // A map from certprovider instance names to parsed buildable configs. + certProviderConfigs map[string]*certprovider.BuildableConfig +} + +// XDSServers returns the top-level list of management servers to connect to, +// ordered by priority. +func (c *Config) XDSServers() ServerConfigs { + return c.xDSServers +} + +// CertProviderConfigs returns a map from certificate provider plugin instance +// name to their configuration. Callers must not modify the returned map. +func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig { + return c.certProviderConfigs +} + +// ServerListenerResourceNameTemplate returns template for the name of the +// Listener resource to subscribe to for a gRPC server. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with the IP +// and port on which the server is listening. (e.g., "0.0.0.0:8080", +// "[::]:8080"). For example, a value of "example/resource/%s" could become +// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", +// the replaced string will be %-encoded. +// +// There is no default; if unset, xDS-based server creation fails. +func (c *Config) ServerListenerResourceNameTemplate() string { + return c.serverListenerResourceNameTemplate +} + +// ClientDefaultListenerResourceNameTemplate returns a template for the name of +// the Listener resource to subscribe to for a gRPC client channel. Used only +// when the channel is created with an "xds:" URI with no authority. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with +// the service authority (i.e., the path part of the target URI +// used to create the gRPC channel). If the template starts with +// "xdstp:", the replaced string will be %-encoded. +// +// Defaults to "%s". +func (c *Config) ClientDefaultListenerResourceNameTemplate() string { + return c.clientDefaultListenerResourceNameTemplate +} + +// Authorities returns a map of authority name to corresponding configuration. +// Callers must not modify the returned map. +// +// This is used in the following cases: +// - A gRPC client channel is created using an "xds:" URI that includes +// an authority. +// - A gRPC client channel is created using an "xds:" URI with no +// authority, but the "client_default_listener_resource_name_template" +// field above turns it into an "xdstp:" URI. +// - A gRPC server is created and the +// "server_listener_resource_name_template" field is an "xdstp:" URI. +// +// In any of those cases, it is an error if the specified authority is +// not present in this map. +func (c *Config) Authorities() map[string]*Authority { + return c.authorities +} + +// Node returns xDS a v3 Node proto corresponding to the node field in the +// bootstrap configuration, which identifies a specific gRPC instance. +func (c *Config) Node() *v3corepb.Node { + return c.node.toProto() +} + +// Equal returns true if c equals other. +func (c *Config) Equal(other *Config) bool { + switch { + case c == nil && other == nil: + return true + case (c != nil) != (other != nil): + return false + case !c.xDSServers.Equal(&other.xDSServers): + return false + case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }): + return false + case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate: + return false + case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate: + return false + case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }): + return false + case !c.node.Equal(other.node): + return false + } + return true +} + +// String returns a string representation of the Config. +func (c *Config) String() string { + s, _ := c.MarshalJSON() + return string(s) +} + +// The following fields correspond 1:1 with the JSON schema for Config. +type configJSON struct { + XDSServers ServerConfigs `json:"xds_servers,omitempty"` + CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + Authorities map[string]*Authority `json:"authorities,omitempty"` + Node node `json:"node,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this config. +func (c *Config) MarshalJSON() ([]byte, error) { + config := &configJSON{ + XDSServers: c.xDSServers, + CertificateProviders: c.cpcs, + ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate, + Authorities: c.authorities, + Node: c.node, + } + return json.MarshalIndent(config, " ", " ") +} + +// UnmarshalJSON takes the json data (the complete bootstrap configuration) and +// unmarshals it to the struct. +func (c *Config) UnmarshalJSON(data []byte) error { + // Initialize the node field with client controlled values. This ensures + // even if the bootstrap configuration did not contain the node field, we + // will have a node field with client controlled fields alone. + config := configJSON{Node: newNode()} + if err := json.Unmarshal(data, &config); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err) + } + + c.xDSServers = config.XDSServers + c.cpcs = config.CertificateProviders + c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate + c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate + c.authorities = config.Authorities + c.node = config.Node + + // Build the certificate providers configuration to ensure that it is valid. + cpcCfgs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, nameAndConfig := range c.cpcs { + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue + } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) + } + cpcCfgs[instance] = bc + } + c.certProviderConfigs = cpcCfgs + + // Default value of the default client listener name template is "%s". + if c.clientDefaultListenerResourceNameTemplate == "" { + c.clientDefaultListenerResourceNameTemplate = "%s" + } + if len(c.xDSServers) == 0 { + return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data)) + } + + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range c.authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } + } + return nil +} + +// GetConfiguration returns the bootstrap configuration initialized by reading +// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents +// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the +// former is preferred. +// +// If none of the env vars are set, this function returns the fallback +// configuration if it is not nil. Else, it returns an error. +// +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func GetConfiguration() (*Config, error) { + fName := envconfig.XDSBootstrapFileName + fContent := envconfig.XDSBootstrapFileContent + + if fName != "" { + if logger.V(2) { + logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName) + } + cfg, err := bootstrapFileReadFunc(fName) + if err != nil { + return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err) + } + return newConfigFromContents(cfg) + } + + if fContent != "" { + if logger.V(2) { + logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable") + } + return newConfigFromContents([]byte(fContent)) + } + + if cfg := fallbackBootstrapConfig(); cfg != nil { + if logger.V(2) { + logger.Infof("Using bootstrap contents from fallback config") + } + return cfg, nil + } + + return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) +} + +func newConfigFromContents(data []byte) (*Config, error) { + // Normalize the input configuration. + buf := bytes.Buffer{} + err := json.Indent(&buf, data, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err) + } + data = bytes.TrimSpace(buf.Bytes()) + + config := &Config{} + if err := config.UnmarshalJSON(data); err != nil { + return nil, err + } + return config, nil +} + +// ConfigOptionsForTesting specifies options for creating a new bootstrap +// configuration for testing purposes. +// +// # Testing-Only +type ConfigOptionsForTesting struct { + // Servers is the top-level xDS server configuration. It contains a list of + // server configurations. + Servers json.RawMessage + // CertificateProviders is the certificate providers configuration. + CertificateProviders map[string]json.RawMessage + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. + ServerListenerResourceNameTemplate string + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // Authorities is a list of non-default authorities. + Authorities map[string]json.RawMessage + // Node identifies the gRPC client/server node in the + // proxyless service mesh. + Node json.RawMessage +} + +// NewContentsForTesting creates a new bootstrap configuration from the passed in +// options, for testing purposes. +// +// # Testing-Only +func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) { + var servers ServerConfigs + if err := json.Unmarshal(opts.Servers, &servers); err != nil { + return nil, err + } + certProviders := make(map[string]certproviderNameAndConfig) + for k, v := range opts.CertificateProviders { + cp := certproviderNameAndConfig{} + if err := json.Unmarshal(v, &cp); err != nil { + return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v)) + } + certProviders[k] = cp + } + authorities := make(map[string]*Authority) + for k, v := range opts.Authorities { + a := &Authority{} + if err := json.Unmarshal(v, a); err != nil { + return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v)) + } + authorities[k] = a + } + node := newNode() + if err := json.Unmarshal(opts.Node, &node); err != nil { + return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err) + } + cfgJSON := configJSON{ + XDSServers: servers, + CertificateProviders: certProviders, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + Authorities: authorities, + Node: node, + } + contents, err := json.MarshalIndent(cfgJSON, " ", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err) + } + return contents, nil +} + +// NewConfigForTesting creates a new bootstrap configuration from the provided +// contents, for testing purposes. +// +// # Testing-Only +func NewConfigForTesting(contents []byte) (*Config, error) { + return newConfigFromContents(contents) +} + +// certproviderNameAndConfig is the internal representation of +// the`certificate_providers` field in the bootstrap configuration. +type certproviderNameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` +} + +// locality is the internal representation of the locality field within node. +type locality struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"sub_zone,omitempty"` +} + +func (l locality) Equal(other locality) bool { + return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone +} + +func (l locality) isEmpty() bool { + return l.Equal(locality{}) +} + +type userAgentVersion struct { + UserAgentVersion string `json:"user_agent_version,omitempty"` +} + +// node is the internal representation of the node field in the bootstrap +// configuration. +type node struct { + ID string `json:"id,omitempty"` + Cluster string `json:"cluster,omitempty"` + Locality locality `json:"locality,omitempty"` + Metadata *structpb.Struct `json:"metadata,omitempty"` + + // The following fields are controlled by the client implementation and + // should not unmarshaled from JSON. + userAgentName string + userAgentVersionType userAgentVersion + clientFeatures []string +} + +// newNode is a convenience function to create a new node instance with fields +// controlled by the client implementation set to the desired values. +func newNode() node { + return node{ + userAgentName: gRPCUserAgentName, + userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version}, + clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, + } +} + +func (n node) Equal(other node) bool { + switch { + case n.ID != other.ID: + return false + case n.Cluster != other.Cluster: + return false + case !n.Locality.Equal(other.Locality): + return false + case n.userAgentName != other.userAgentName: + return false + case n.userAgentVersionType != other.userAgentVersionType: + return false + } + + // Consider failures in JSON marshaling as being unable to perform the + // comparison, and hence return false. + nMetadata, err := n.Metadata.MarshalJSON() + if err != nil { + return false + } + otherMetadata, err := other.Metadata.MarshalJSON() + if err != nil { + return false + } + if !bytes.Equal(nMetadata, otherMetadata) { + return false + } + + return slices.Equal(n.clientFeatures, other.clientFeatures) +} + +func (n node) toProto() *v3corepb.Node { + return &v3corepb.Node{ + Id: n.ID, + Cluster: n.Cluster, + Locality: func() *v3corepb.Locality { + if n.Locality.isEmpty() { + return nil + } + return &v3corepb.Locality{ + Region: n.Locality.Region, + Zone: n.Locality.Zone, + SubZone: n.Locality.SubZone, + } + }(), + Metadata: proto.Clone(n.Metadata).(*structpb.Struct), + UserAgentName: n.userAgentName, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion}, + ClientFeatures: slices.Clone(n.clientFeatures), + } +} + +// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be +// used when the bootstrap environment variables are unset. +// +// The provided configuration must be valid JSON. Returns a non-nil error if +// parsing the provided configuration fails. +func SetFallbackBootstrapConfig(cfgJSON []byte) error { + config, err := newConfigFromContents(cfgJSON) + if err != nil { + return err + } + + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = config + return nil +} + +// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap +// configuration to be used when the bootstrap environment variables are unset. +// +// # Testing-Only +func UnsetFallbackBootstrapConfigForTesting() { + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = nil +} + +// fallbackBootstrapConfig returns the fallback bootstrap configuration +// that will be used by the xDS client when the bootstrap environment +// variables are unset. +func fallbackBootstrapConfig() *Config { + configMu.Lock() + defer configMu.Unlock() + return fallbackBootstrapCfg +} + +var ( + configMu sync.Mutex + fallbackBootstrapCfg *Config +) diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go new file mode 100644 index 000000000..fdd811dd8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bootstrap + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-bootstrap] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go new file mode 100644 index 000000000..ec1a30919 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import ( + "net/url" + "strings" +) + +// PopulateResourceTemplate populates the given template using the target +// string. "%s", if exists in the template, will be replaced with target. +// +// If the template starts with "xdstp:", the replaced string will be %-encoded. +// But note that "/" is not percent encoded. +func PopulateResourceTemplate(template, target string) string { + if !strings.Contains(template, "%s") { + return template + } + if strings.HasPrefix(template, "xdstp:") { + target = percentEncode(target) + } + return strings.ReplaceAll(template, "%s", target) +} + +// percentEncode percent encode t, except for "/". See the tests for examples. +func percentEncode(t string) string { + segs := strings.Split(t, "/") + for i := range segs { + segs[i] = url.PathEscape(segs[i]) + } + return strings.Join(segs, "/") +} diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go new file mode 100644 index 000000000..02da3dbf3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tlscreds implements mTLS Credentials in xDS Bootstrap File. +// See gRFC A65: github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md. +package tlscreds + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/credentials/tls/certprovider/pemfile" + "google.golang.org/grpc/internal/grpcsync" +) + +// bundle is an implementation of credentials.Bundle which implements mTLS +// Credentials in xDS Bootstrap File. +type bundle struct { + transportCredentials credentials.TransportCredentials +} + +// NewBundle returns a credentials.Bundle which implements mTLS Credentials in xDS +// Bootstrap File. It delegates certificate loading to a file_watcher provider +// if either client certificates or server root CA is specified. The second +// return value is a close func that should be called when the caller no longer +// needs this bundle. +// See gRFC A65: github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md +func NewBundle(jd json.RawMessage) (credentials.Bundle, func(), error) { + cfg := &struct { + CertificateFile string `json:"certificate_file"` + CACertificateFile string `json:"ca_certificate_file"` + PrivateKeyFile string `json:"private_key_file"` + }{} + + if jd != nil { + if err := json.Unmarshal(jd, cfg); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal config: %v", err) + } + } // Else the config field is absent. Treat it as an empty config. + + if cfg.CACertificateFile == "" && cfg.CertificateFile == "" && cfg.PrivateKeyFile == "" { + // We cannot use (and do not need) a file_watcher provider in this case, + // and can simply directly use the TLS transport credentials. + // Quoting A65: + // + // > The only difference between the file-watcher certificate provider + // > config and this one is that in the file-watcher certificate + // > provider, at least one of the "certificate_file" or + // > "ca_certificate_file" fields must be specified, whereas in this + // > configuration, it is acceptable to specify neither one. + return &bundle{transportCredentials: credentials.NewTLS(&tls.Config{})}, func() {}, nil + } + // Otherwise we need to use a file_watcher provider to watch the CA, + // private and public keys. + + // The pemfile plugin (file_watcher) currently ignores BuildOptions. + provider, err := certprovider.GetProvider(pemfile.PluginName, jd, certprovider.BuildOptions{}) + if err != nil { + return nil, nil, err + } + return &bundle{ + transportCredentials: &reloadingCreds{provider: provider}, + }, grpcsync.OnceFunc(func() { provider.Close() }), nil +} + +func (t *bundle) TransportCredentials() credentials.TransportCredentials { + return t.transportCredentials +} + +func (t *bundle) PerRPCCredentials() credentials.PerRPCCredentials { + // mTLS provides transport credentials only. There are no per-RPC + // credentials. + return nil +} + +func (t *bundle) NewWithMode(string) (credentials.Bundle, error) { + // This bundle has a single mode which only uses TLS transport credentials, + // so there is no legitimate case where callers would call NewWithMode. + return nil, fmt.Errorf("xDS TLS credentials only support one mode") +} + +// reloadingCreds is a credentials.TransportCredentials for client +// side mTLS that reloads the server root CA certificate and the client +// certificates from the provider on every client handshake. This is necessary +// because the standard TLS credentials do not support reloading CA +// certificates. +type reloadingCreds struct { + provider certprovider.Provider +} + +func (c *reloadingCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + km, err := c.provider.KeyMaterial(ctx) + if err != nil { + return nil, nil, err + } + config := &tls.Config{ + RootCAs: km.Roots, + Certificates: km.Certs, + } + return credentials.NewTLS(config).ClientHandshake(ctx, authority, rawConn) +} + +func (c *reloadingCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "tls"} +} + +func (c *reloadingCreds) Clone() credentials.TransportCredentials { + return &reloadingCreds{provider: c.provider} +} + +func (c *reloadingCreds) OverrideServerName(string) error { + return errors.New("overriding server name is not supported by xDS client TLS credentials") +} + +func (c *reloadingCreds) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, nil, errors.New("server handshake is not supported by xDS client TLS credentials") +} diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go new file mode 100644 index 000000000..01433f412 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -0,0 +1,274 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package matcher + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" +) + +// HeaderMatcher is an interface for header matchers. These are +// documented in (EnvoyProxy link here?). These matchers will match on different +// aspects of HTTP header name/value pairs. +type HeaderMatcher interface { + Match(metadata.MD) bool + String() string +} + +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { + vs, ok := md[key] + if !ok { + return "", false + } + return strings.Join(vs, ","), true +} + +// HeaderExactMatcher matches on an exact match of the value of the header. +type HeaderExactMatcher struct { + key string + exact string + invert bool +} + +// NewHeaderExactMatcher returns a new HeaderExactMatcher. +func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderExactMatcher. +func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hem.key) + if !ok { + return false + } + return (v == hem.exact) != hem.invert +} + +func (hem *HeaderExactMatcher) String() string { + return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) +} + +// HeaderRegexMatcher matches on whether the entire request header value matches +// the regex. +type HeaderRegexMatcher struct { + key string + re *regexp.Regexp + invert bool +} + +// NewHeaderRegexMatcher returns a new HeaderRegexMatcher. +func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRegexMatcher. +func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + return grpcutil.FullMatchWithRegex(hrm.re, v) != hrm.invert +} + +func (hrm *HeaderRegexMatcher) String() string { + return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) +} + +// HeaderRangeMatcher matches on whether the request header value is within the +// range. The header value must be an integer in base 10 notation. +type HeaderRangeMatcher struct { + key string + start, end int64 // represents [start, end). + invert bool +} + +// NewHeaderRangeMatcher returns a new HeaderRangeMatcher. +func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRangeMatcher. +func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { + return !hrm.invert + } + return hrm.invert +} + +func (hrm *HeaderRangeMatcher) String() string { + return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) +} + +// HeaderPresentMatcher will match based on whether the header is present in the +// whole request. +type HeaderPresentMatcher struct { + key string + present bool +} + +// NewHeaderPresentMatcher returns a new HeaderPresentMatcher. +func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPresentMatcher { + if invert { + present = !present + } + return &HeaderPresentMatcher{key: key, present: present} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPresentMatcher. +func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) + present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? + return present == hpm.present +} + +func (hpm *HeaderPresentMatcher) String() string { + return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) +} + +// HeaderPrefixMatcher matches on whether the prefix of the header value matches +// the prefix passed into this struct. +type HeaderPrefixMatcher struct { + key string + prefix string + invert bool +} + +// NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. +func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPrefixMatcher. +func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) + if !ok { + return false + } + return strings.HasPrefix(v, hpm.prefix) != hpm.invert +} + +func (hpm *HeaderPrefixMatcher) String() string { + return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) +} + +// HeaderSuffixMatcher matches on whether the suffix of the header value matches +// the suffix passed into this struct. +type HeaderSuffixMatcher struct { + key string + suffix string + invert bool +} + +// NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. +func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderSuffixMatcher. +func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return strings.HasSuffix(v, hsm.suffix) != hsm.invert +} + +func (hsm *HeaderSuffixMatcher) String() string { + return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) +} + +// HeaderContainsMatcher matches on whether the header value contains the +// value passed into this struct. +type HeaderContainsMatcher struct { + key string + contains string + invert bool +} + +// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP +// Header key to match on, and contains is the value that the header should +// should contain for a successful match. An empty contains string does not +// work, use HeaderPresentMatcher in that case. +func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderContainsMatcher. +func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) + if !ok { + return false + } + return strings.Contains(v, hcm.contains) != hcm.invert +} + +func (hcm *HeaderContainsMatcher) String() string { + return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) +} + +// HeaderStringMatcher matches on whether the header value matches against the +// StringMatcher specified. +type HeaderStringMatcher struct { + key string + stringMatcher StringMatcher + invert bool +} + +// NewHeaderStringMatcher returns a new HeaderStringMatcher. +func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderStringMatcher { + return &HeaderStringMatcher{ + key: key, + stringMatcher: sm, + invert: invert, + } +} + +// Match returns whether the passed in HTTP Headers match according to the +// specified StringMatcher. +func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return hsm.stringMatcher.Match(v) != hsm.invert +} + +func (hsm *HeaderStringMatcher) String() string { + return fmt.Sprintf("headerString:%v:%v", hsm.key, hsm.stringMatcher) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go new file mode 100644 index 000000000..c138f7873 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package matcher contains types that need to be shared between code under +// google.golang.org/grpc/xds/... and the rest of gRPC. +package matcher + +import ( + "errors" + "fmt" + "regexp" + "strings" + + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/grpc/internal/grpcutil" +) + +// StringMatcher contains match criteria for matching a string, and is an +// internal representation of the `StringMatcher` proto defined at +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/type/matcher/v3/string.proto. +type StringMatcher struct { + // Since these match fields are part of a `oneof` in the corresponding xDS + // proto, only one of them is expected to be set. + exactMatch *string + prefixMatch *string + suffixMatch *string + regexMatch *regexp.Regexp + containsMatch *string + // If true, indicates the exact/prefix/suffix/contains matching should be + // case insensitive. This has no effect on the regex match. + ignoreCase bool +} + +// Match returns true if input matches the criteria in the given StringMatcher. +func (sm StringMatcher) Match(input string) bool { + if sm.ignoreCase { + input = strings.ToLower(input) + } + switch { + case sm.exactMatch != nil: + return input == *sm.exactMatch + case sm.prefixMatch != nil: + return strings.HasPrefix(input, *sm.prefixMatch) + case sm.suffixMatch != nil: + return strings.HasSuffix(input, *sm.suffixMatch) + case sm.regexMatch != nil: + return grpcutil.FullMatchWithRegex(sm.regexMatch, input) + case sm.containsMatch != nil: + return strings.Contains(input, *sm.containsMatch) + } + return false +} + +// StringMatcherFromProto is a helper function to create a StringMatcher from +// the corresponding StringMatcher proto. +// +// Returns a non-nil error if matcherProto is invalid. +func StringMatcherFromProto(matcherProto *v3matcherpb.StringMatcher) (StringMatcher, error) { + if matcherProto == nil { + return StringMatcher{}, errors.New("input StringMatcher proto is nil") + } + + matcher := StringMatcher{ignoreCase: matcherProto.GetIgnoreCase()} + switch mt := matcherProto.GetMatchPattern().(type) { + case *v3matcherpb.StringMatcher_Exact: + matcher.exactMatch = &mt.Exact + if matcher.ignoreCase { + *matcher.exactMatch = strings.ToLower(*matcher.exactMatch) + } + case *v3matcherpb.StringMatcher_Prefix: + if matcherProto.GetPrefix() == "" { + return StringMatcher{}, errors.New("empty prefix is not allowed in StringMatcher") + } + matcher.prefixMatch = &mt.Prefix + if matcher.ignoreCase { + *matcher.prefixMatch = strings.ToLower(*matcher.prefixMatch) + } + case *v3matcherpb.StringMatcher_Suffix: + if matcherProto.GetSuffix() == "" { + return StringMatcher{}, errors.New("empty suffix is not allowed in StringMatcher") + } + matcher.suffixMatch = &mt.Suffix + if matcher.ignoreCase { + *matcher.suffixMatch = strings.ToLower(*matcher.suffixMatch) + } + case *v3matcherpb.StringMatcher_SafeRegex: + regex := matcherProto.GetSafeRegex().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return StringMatcher{}, fmt.Errorf("safe_regex matcher %q is invalid", regex) + } + matcher.regexMatch = re + case *v3matcherpb.StringMatcher_Contains: + if matcherProto.GetContains() == "" { + return StringMatcher{}, errors.New("empty contains is not allowed in StringMatcher") + } + matcher.containsMatch = &mt.Contains + if matcher.ignoreCase { + *matcher.containsMatch = strings.ToLower(*matcher.containsMatch) + } + default: + return StringMatcher{}, fmt.Errorf("unrecognized string matcher: %+v", matcherProto) + } + return matcher, nil +} + +// StringMatcherForTesting is a helper function to create a StringMatcher based +// on the given arguments. Intended only for testing purposes. +func StringMatcherForTesting(exact, prefix, suffix, contains *string, regex *regexp.Regexp, ignoreCase bool) StringMatcher { + sm := StringMatcher{ + exactMatch: exact, + prefixMatch: prefix, + suffixMatch: suffix, + regexMatch: regex, + containsMatch: contains, + ignoreCase: ignoreCase, + } + if ignoreCase { + switch { + case sm.exactMatch != nil: + *sm.exactMatch = strings.ToLower(*exact) + case sm.prefixMatch != nil: + *sm.prefixMatch = strings.ToLower(*prefix) + case sm.suffixMatch != nil: + *sm.suffixMatch = strings.ToLower(*suffix) + case sm.containsMatch != nil: + *sm.containsMatch = strings.ToLower(*contains) + } + } + return sm +} + +// ExactMatch returns the value of the configured exact match or an empty string +// if exact match criteria was not specified. +func (sm StringMatcher) ExactMatch() string { + if sm.exactMatch != nil { + return *sm.exactMatch + } + return "" +} + +// Equal returns true if other and sm are equivalent to each other. +func (sm StringMatcher) Equal(other StringMatcher) bool { + if sm.ignoreCase != other.ignoreCase { + return false + } + + if (sm.exactMatch != nil) != (other.exactMatch != nil) || + (sm.prefixMatch != nil) != (other.prefixMatch != nil) || + (sm.suffixMatch != nil) != (other.suffixMatch != nil) || + (sm.regexMatch != nil) != (other.regexMatch != nil) || + (sm.containsMatch != nil) != (other.containsMatch != nil) { + return false + } + + switch { + case sm.exactMatch != nil: + return *sm.exactMatch == *other.exactMatch + case sm.prefixMatch != nil: + return *sm.prefixMatch == *other.prefixMatch + case sm.suffixMatch != nil: + return *sm.suffixMatch == *other.suffixMatch + case sm.regexMatch != nil: + return sm.regexMatch.String() == other.regexMatch.String() + case sm.containsMatch != nil: + return *sm.containsMatch == *other.containsMatch + } + return true +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go new file mode 100644 index 000000000..fb599954a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go @@ -0,0 +1,101 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { + if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { + return nil, fmt.Errorf("missing required field: TypedConfig") + } + customConfig, loggerName, err := getCustomConfig(loggerConfig.AuditLogger.TypedConfig) + if err != nil { + return nil, err + } + if loggerName == "" { + return nil, fmt.Errorf("field TypedConfig.TypeURL cannot be an empty string") + } + factory := audit.GetLoggerBuilder(loggerName) + if factory == nil { + if loggerConfig.IsOptional { + return nil, nil + } + return nil, fmt.Errorf("no builder registered for %v", loggerName) + } + auditLoggerConfig, err := factory.ParseLoggerConfig(customConfig) + if err != nil { + return nil, fmt.Errorf("custom config could not be parsed by registered factory. error: %v", err) + } + auditLogger := factory.Build(auditLoggerConfig) + return auditLogger, nil +} + +func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { + c, err := config.UnmarshalNew() + if err != nil { + return nil, "", err + } + switch m := c.(type) { + case *v1xdsudpatypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3xdsxdstypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3auditloggersstreampb.StdoutAuditLog: + return convertStdoutConfig(m) + } + return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) +} + +func convertStdoutConfig(config *v3auditloggersstreampb.StdoutAuditLog) (json.RawMessage, string, error) { + json, err := protojson.Marshal(config) + return json, stdout.Name, err +} + +func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + if len(urls) == 0 { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: typeURL must have a url-like format with the typeName being the value after the last /", typeURL, s) + } + name := urls[len(urls)-1] + + rawJSON := []byte("{}") + var err error + if s != nil { + rawJSON, err = json.Marshal(s) + if err != nil { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: %v", typeURL, s, err) + } + } + return rawJSON, name, nil +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go new file mode 100644 index 000000000..e1c15018b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -0,0 +1,432 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "errors" + "fmt" + "net" + "regexp" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3route_componentspb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + internalmatcher "google.golang.org/grpc/internal/xds/matcher" +) + +// matcher is an interface that takes data about incoming RPC's and returns +// whether it matches with whatever matcher implements this interface. +type matcher interface { + match(data *rpcData) bool +} + +// policyMatcher helps determine whether an incoming RPC call matches a policy. +// A policy is a logical role (e.g. Service Admin), which is comprised of +// permissions and principals. A principal is an identity (or identities) for a +// downstream subject which are assigned the policy (role), and a permission is +// an action(s) that a principal(s) can take. A policy matches if both a +// permission and a principal match, which will be determined by the child or +// permissions and principal matchers. policyMatcher implements the matcher +// interface. +type policyMatcher struct { + permissions *orMatcher + principals *orMatcher +} + +func newPolicyMatcher(policy *v3rbacpb.Policy) (*policyMatcher, error) { + permissions, err := matchersFromPermissions(policy.Permissions) + if err != nil { + return nil, err + } + principals, err := matchersFromPrincipals(policy.Principals) + if err != nil { + return nil, err + } + return &policyMatcher{ + permissions: &orMatcher{matchers: permissions}, + principals: &orMatcher{matchers: principals}, + }, nil +} + +func (pm *policyMatcher) match(data *rpcData) bool { + // A policy matches if and only if at least one of its permissions match the + // action taking place AND at least one if its principals match the + // downstream peer. + return pm.permissions.match(data) && pm.principals.match(data) +} + +// matchersFromPermissions takes a list of permissions (can also be +// a single permission, e.g. from a not matcher which is logically !permission) +// and returns a list of matchers which correspond to that permission. This will +// be called in many instances throughout the initial construction of the RBAC +// engine from the AND and OR matchers and also from the NOT matcher. +func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, error) { + var matchers []matcher + for _, permission := range permissions { + switch permission.GetRule().(type) { + case *v3rbacpb.Permission_AndRules: + mList, err := matchersFromPermissions(permission.GetAndRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Permission_OrRules: + mList, err := matchersFromPermissions(permission.GetOrRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Permission_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Permission_Header: + m, err := newHeaderMatcher(permission.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_UrlPath: + m, err := newURLPathMatcher(permission.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationIp: + // Due to this being on server side, the destination IP is the local + // IP. + m, err := newLocalIPMatcher(permission.GetDestinationIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationPort: + matchers = append(matchers, newPortMatcher(permission.GetDestinationPort())) + case *v3rbacpb.Permission_NotRule: + mList, err := matchersFromPermissions([]*v3rbacpb.Permission{{Rule: permission.GetNotRule().Rule}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Permission_Metadata: + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } + case *v3rbacpb.Permission_RequestedServerName: + // Not supported in gRPC RBAC currently - a permission typed as + // requested server name in the initial config will be a no-op. + } + } + return matchers, nil +} + +func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) { + var matchers []matcher + for _, principal := range principals { + switch principal.GetIdentifier().(type) { + case *v3rbacpb.Principal_AndIds: + mList, err := matchersFromPrincipals(principal.GetAndIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Principal_OrIds: + mList, err := matchersFromPrincipals(principal.GetOrIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Principal_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Principal_Authenticated_: + authenticatedMatcher, err := newAuthenticatedMatcher(principal.GetAuthenticated()) + if err != nil { + return nil, err + } + matchers = append(matchers, authenticatedMatcher) + case *v3rbacpb.Principal_DirectRemoteIp: + m, err := newRemoteIPMatcher(principal.GetDirectRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Header: + // Do we need an error here? + m, err := newHeaderMatcher(principal.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_UrlPath: + m, err := newURLPathMatcher(principal.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_NotId: + mList, err := matchersFromPrincipals([]*v3rbacpb.Principal{{Identifier: principal.GetNotId().Identifier}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Principal_SourceIp: + // The source ip principal identifier is deprecated. Thus, a + // principal typed as a source ip in the identifier will be a no-op. + // The config should use DirectRemoteIp instead. + case *v3rbacpb.Principal_RemoteIp: + // RBAC in gRPC treats direct_remote_ip and remote_ip as logically + // equivalent, as per A41. + m, err := newRemoteIPMatcher(principal.GetRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Metadata: + // Not supported in gRPC RBAC currently - a principal typed as + // Metadata in the initial config will be a no-op. + } + } + return matchers, nil +} + +// orMatcher is a matcher where it successfully matches if one of it's +// children successfully match. It also logically represents a principal or +// permission, but can also be it's own entity further down the tree of +// matchers. orMatcher implements the matcher interface. +type orMatcher struct { + matchers []matcher +} + +func (om *orMatcher) match(data *rpcData) bool { + // Range through child matchers and pass in data about incoming RPC, and + // only one child matcher has to match to be logically successful. + for _, m := range om.matchers { + if m.match(data) { + return true + } + } + return false +} + +// andMatcher is a matcher that is successful if every child matcher +// matches. andMatcher implements the matcher interface. +type andMatcher struct { + matchers []matcher +} + +func (am *andMatcher) match(data *rpcData) bool { + for _, m := range am.matchers { + if !m.match(data) { + return false + } + } + return true +} + +// alwaysMatcher is a matcher that will always match. This logically +// represents an any rule for a permission or a principal. alwaysMatcher +// implements the matcher interface. +type alwaysMatcher struct { +} + +func (am *alwaysMatcher) match(*rpcData) bool { + return true +} + +// notMatcher is a matcher that nots an underlying matcher. notMatcher +// implements the matcher interface. +type notMatcher struct { + matcherToNot matcher +} + +func (nm *notMatcher) match(data *rpcData) bool { + return !nm.matcherToNot.match(data) +} + +// headerMatcher is a matcher that matches on incoming HTTP Headers present +// in the incoming RPC. headerMatcher implements the matcher interface. +type headerMatcher struct { + matcher internalmatcher.HeaderMatcher +} + +func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) (*headerMatcher, error) { + var m internalmatcher.HeaderMatcher + switch headerMatcherConfig.HeaderMatchSpecifier.(type) { + case *v3route_componentspb.HeaderMatcher_ExactMatch: + m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SafeRegexMatch: + regex, err := regexp.Compile(headerMatcherConfig.GetSafeRegexMatch().Regex) + if err != nil { + return nil, err + } + m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_RangeMatch: + m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PresentMatch: + m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PrefixMatch: + m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SuffixMatch: + m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_ContainsMatch: + m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_StringMatch: + sm, err := internalmatcher.StringMatcherFromProto(headerMatcherConfig.GetStringMatch()) + if err != nil { + return nil, fmt.Errorf("invalid string matcher %+v: %v", headerMatcherConfig.GetStringMatch(), err) + } + m = internalmatcher.NewHeaderStringMatcher(headerMatcherConfig.Name, sm, headerMatcherConfig.InvertMatch) + default: + return nil, errors.New("unknown header matcher type") + } + return &headerMatcher{matcher: m}, nil +} + +func (hm *headerMatcher) match(data *rpcData) bool { + return hm.matcher.Match(data.md) +} + +// urlPathMatcher matches on the URL Path of the incoming RPC. In gRPC, this +// logically maps to the full method name the RPC is calling on the server side. +// urlPathMatcher implements the matcher interface. +type urlPathMatcher struct { + stringMatcher internalmatcher.StringMatcher +} + +func newURLPathMatcher(pathMatcher *v3matcherpb.PathMatcher) (*urlPathMatcher, error) { + stringMatcher, err := internalmatcher.StringMatcherFromProto(pathMatcher.GetPath()) + if err != nil { + return nil, err + } + return &urlPathMatcher{stringMatcher: stringMatcher}, nil +} + +func (upm *urlPathMatcher) match(data *rpcData) bool { + return upm.stringMatcher.Match(data.fullMethod) +} + +// remoteIPMatcher and localIPMatcher both are matchers that match against +// a CIDR Range. Two different matchers are needed as the remote and destination +// ip addresses come from different parts of the data about incoming RPC's +// passed in. Matching a CIDR Range means to determine whether the IP Address +// falls within the CIDR Range or not. They both implement the matcher +// interface. +type remoteIPMatcher struct { + // ipNet represents the CidrRange that this matcher was configured with. + // This is what will remote and destination IP's will be matched against. + ipNet *net.IPNet +} + +func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) { + // Convert configuration to a cidrRangeString, as Go standard library has + // methods that parse cidr string. + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &remoteIPMatcher{ipNet: ipNet}, nil +} + +func (sim *remoteIPMatcher) match(data *rpcData) bool { + return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) +} + +type localIPMatcher struct { + ipNet *net.IPNet +} + +func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &localIPMatcher{ipNet: ipNet}, nil +} + +func (dim *localIPMatcher) match(data *rpcData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) +} + +// portMatcher matches on whether the destination port of the RPC matches the +// destination port this matcher was instantiated with. portMatcher +// implements the matcher interface. +type portMatcher struct { + destinationPort uint32 +} + +func newPortMatcher(destinationPort uint32) *portMatcher { + return &portMatcher{destinationPort: destinationPort} +} + +func (pm *portMatcher) match(data *rpcData) bool { + return data.destinationPort == pm.destinationPort +} + +// authenticatedMatcher matches on the name of the Principal. If set, the URI +// SAN or DNS SAN in that order is used from the certificate, otherwise the +// subject field is used. If unset, it applies to any user that is +// authenticated. authenticatedMatcher implements the matcher interface. +type authenticatedMatcher struct { + stringMatcher *internalmatcher.StringMatcher +} + +func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Authenticated) (*authenticatedMatcher, error) { + // Represents this line in the RBAC documentation = "If unset, it applies to + // any user that is authenticated" (see package-level comments). + if authenticatedMatcherConfig.PrincipalName == nil { + return &authenticatedMatcher{}, nil + } + stringMatcher, err := internalmatcher.StringMatcherFromProto(authenticatedMatcherConfig.PrincipalName) + if err != nil { + return nil, err + } + return &authenticatedMatcher{stringMatcher: &stringMatcher}, nil +} + +func (am *authenticatedMatcher) match(data *rpcData) bool { + if data.authType != "tls" { + // Connection is not authenticated. + return false + } + if am.stringMatcher == nil { + // Allows any authenticated user. + return true + } + // "If there is no client certificate (thus no SAN nor Subject), check if "" + // (empty string) matches. If it matches, the principal_name is said to + // match" - A41 + if len(data.certs) == 0 { + return am.stringMatcher.Match("") + } + cert := data.certs[0] + // The order of matching as per the RBAC documentation (see package-level comments) + // is as follows: URI SANs, DNS SANs, and then subject name. + for _, uriSAN := range cert.URIs { + if am.stringMatcher.Match(uriSAN.String()) { + return true + } + } + for _, dnsSAN := range cert.DNSNames { + if am.stringMatcher.Match(dnsSAN) { + return true + } + } + return am.stringMatcher.Match(cert.Subject.String()) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go new file mode 100644 index 000000000..344052cb0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -0,0 +1,316 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package rbac provides service-level and method-level access control for a +// service. See +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#role-based-access-control-rbac +// for documentation. +package rbac + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" +) + +var logger = grpclog.Component("rbac") + +var getConnection = transport.GetConnection + +// ChainEngine represents a chain of RBAC Engines, used to make authorization +// decisions on incoming RPCs. +type ChainEngine struct { + chainedEngines []*engine +} + +// NewChainEngine returns a chain of RBAC engines, used to make authorization +// decisions on incoming RPCs. Returns a non-nil error for invalid policies. +func NewChainEngine(policies []*v3rbacpb.RBAC, policyName string) (*ChainEngine, error) { + engines := make([]*engine, 0, len(policies)) + for _, policy := range policies { + engine, err := newEngine(policy, policyName) + if err != nil { + return nil, err + } + engines = append(engines, engine) + } + return &ChainEngine{chainedEngines: engines}, nil +} + +func (cre *ChainEngine) logRequestDetails(rpcData *rpcData) { + if logger.V(2) { + logger.Infof("checking request: url path=%s", rpcData.fullMethod) + if len(rpcData.certs) > 0 { + cert := rpcData.certs[0] + logger.Infof("uri sans=%q, dns sans=%q, subject=%v", cert.URIs, cert.DNSNames, cert.Subject) + } + } +} + +// IsAuthorized determines if an incoming RPC is authorized based on the chain of RBAC +// engines and their associated actions. +// +// Errors returned by this function are compatible with the status package. +func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { + // This conversion step (i.e. pulling things out of ctx) can be done once, + // and then be used for the whole chain of RBAC Engines. + rpcData, err := newRPCData(ctx) + if err != nil { + logger.Errorf("newRPCData: %v", err) + return status.Errorf(codes.Internal, "gRPC RBAC: %v", err) + } + for _, engine := range cre.chainedEngines { + matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) + if logger.V(2) && ok { + logger.Infof("incoming RPC matched to policy %v in engine with action %v", matchingPolicyName, engine.action) + } + + switch { + case engine.action == v3rbacpb.RBAC_ALLOW && !ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") + case engine.action == v3rbacpb.RBAC_DENY && ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) + } + // Every policy in the engine list must be queried. Thus, iterate to the + // next policy. + engine.doAuditLogging(rpcData, matchingPolicyName, true) + } + // If the incoming RPC gets through all of the engines successfully (i.e. + // doesn't not match an allow or match a deny engine), the RPC is authorized + // to proceed. + return nil +} + +// engine is used for matching incoming RPCs to policies. +type engine struct { + // TODO(gtcooke94) - differentiate between `policyName`, `policies`, and `rules` + policyName string + policies map[string]*policyMatcher + // action must be ALLOW or DENY. + action v3rbacpb.RBAC_Action + auditLoggers []audit.Logger + auditCondition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition +} + +// newEngine creates an RBAC Engine based on the contents of a policy. Returns a +// non-nil error if the policy is invalid. +func newEngine(config *v3rbacpb.RBAC, policyName string) (*engine, error) { + a := config.GetAction() + if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { + return nil, fmt.Errorf("unsupported action %s", config.Action) + } + + policies := make(map[string]*policyMatcher, len(config.GetPolicies())) + for name, policy := range config.GetPolicies() { + matcher, err := newPolicyMatcher(policy) + if err != nil { + return nil, err + } + policies[name] = matcher + } + + auditLoggers, auditCondition, err := parseAuditOptions(config.GetAuditLoggingOptions()) + if err != nil { + return nil, err + } + return &engine{ + policyName: policyName, + policies: policies, + action: a, + auditLoggers: auditLoggers, + auditCondition: auditCondition, + }, nil +} + +func parseAuditOptions(opts *v3rbacpb.RBAC_AuditLoggingOptions) ([]audit.Logger, v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition, error) { + if opts == nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, nil + } + var auditLoggers []audit.Logger + for _, logger := range opts.LoggerConfigs { + auditLogger, err := buildLogger(logger) + if err != nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, err + } + if auditLogger == nil { + // This occurs when the audit logger is not registered but also + // marked optional. + continue + } + auditLoggers = append(auditLoggers, auditLogger) + } + return auditLoggers, opts.GetAuditCondition(), nil + +} + +// findMatchingPolicy determines if an incoming RPC matches a policy. On a +// successful match, it returns the name of the matching policy and a true bool +// to specify that there was a matching policy found. It returns false in +// the case of not finding a matching policy. +func (e *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { + for policy, matcher := range e.policies { + if matcher.match(rpcData) { + return policy, true + } + } + return "", false +} + +// newRPCData takes an incoming context (should be a context representing state +// needed for server RPC Call with metadata, peer info (used for source ip/port +// and TLS information) and connection (used for destination ip/port) piped into +// it) and the method name of the Service being called server side and populates +// an rpcData struct ready to be passed to the RBAC Engine to find a matching +// policy. +func newRPCData(ctx context.Context) (*rpcData, error) { + // The caller should populate all of these fields (i.e. for empty headers, + // pipe an empty md into context). + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errors.New("missing metadata in incoming context") + } + // ":method can be hard-coded to POST if unavailable" - A41 + md[":method"] = []string{"POST"} + // "If the transport exposes TE in Metadata, then RBAC must special-case the + // header to treat it as not present." - A41 + delete(md, "TE") + + pi, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("missing peer info in incoming context") + } + + // The methodName will be available in the passed in ctx from a unary or streaming + // interceptor, as grpc.Server pipes in a transport stream which contains the methodName + // into contexts available in both unary or streaming interceptors. + mn, ok := grpc.Method(ctx) + if !ok { + return nil, errors.New("missing method in incoming context") + } + + // The connection is needed in order to find the destination address and + // port of the incoming RPC Call. + conn := getConnection(ctx) + if conn == nil { + return nil, errors.New("missing connection in incoming context") + } + _, dPort, err := net.SplitHostPort(conn.LocalAddr().String()) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + dp, err := strconv.ParseUint(dPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + + var authType string + var peerCertificates []*x509.Certificate + if tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo); ok { + authType = pi.AuthInfo.AuthType() + peerCertificates = tlsInfo.State.PeerCertificates + } + + return &rpcData{ + md: md, + peerInfo: pi, + fullMethod: mn, + destinationPort: uint32(dp), + localAddr: conn.LocalAddr(), + authType: authType, + certs: peerCertificates, + }, nil +} + +// rpcData wraps data pulled from an incoming RPC that the RBAC engine needs to +// find a matching policy. +type rpcData struct { + // md is the HTTP Headers that are present in the incoming RPC. + md metadata.MD + // peerInfo is information about the downstream peer. + peerInfo *peer.Peer + // fullMethod is the method name being called on the upstream service. + fullMethod string + // destinationPort is the port that the RPC is being sent to on the + // server. + destinationPort uint32 + // localAddr is the address that the RPC is being sent to. + localAddr net.Addr + // authType is the type of authentication e.g. "tls". + authType string + // certs are the certificates presented by the peer during a TLS + // handshake. + certs []*x509.Certificate +} + +func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) { + // In the RBAC world, we need to have a SPIFFE ID as the principal for this + // to be meaningful + principal := "" + if rpcData.peerInfo != nil { + // If AuthType = tls, then we can cast AuthInfo to TLSInfo. + if tlsInfo, ok := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo); ok { + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } + } + } + + //TODO(gtcooke94) check if we need to log before creating the event + event := &audit.Event{ + FullMethodName: rpcData.fullMethod, + Principal: principal, + PolicyName: e.policyName, + MatchedRule: rule, + Authorized: authorized, + } + for _, logger := range e.auditLoggers { + switch e.auditCondition { + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + if !authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + if authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + logger.Log(event) + } + } +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" diff --git a/vendor/google.golang.org/grpc/orca/call_metrics.go b/vendor/google.golang.org/grpc/orca/call_metrics.go new file mode 100644 index 000000000..9ae772142 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + + "google.golang.org/grpc" + grpcinternal "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/proto" +) + +// CallMetricsRecorder allows a service method handler to record per-RPC +// metrics. It contains all utilization-based metrics from +// ServerMetricsRecorder as well as additional request cost metrics. +type CallMetricsRecorder interface { + ServerMetricsRecorder + + // SetRequestCost sets the relevant server metric. + SetRequestCost(name string, val float64) + // DeleteRequestCost deletes the relevant server metric to prevent it + // from being sent. + DeleteRequestCost(name string) + + // SetNamedMetric sets the relevant server metric. + SetNamedMetric(name string, val float64) + // DeleteNamedMetric deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedMetric(name string) +} + +type callMetricsRecorderCtxKey struct{} + +// CallMetricsRecorderFromContext returns the RPC-specific custom metrics +// recorder embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricsRecorderFromContext(ctx context.Context) CallMetricsRecorder { + rw, ok := ctx.Value(callMetricsRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +// recorderWrapper is a wrapper around a CallMetricsRecorder to ensure that +// concurrent calls to CallMetricsRecorderFromContext() results in only one +// allocation of the underlying metrics recorder, while also allowing for lazy +// initialization of the recorder itself. +type recorderWrapper struct { + once sync.Once + r CallMetricsRecorder + smp ServerMetricsProvider +} + +func (rw *recorderWrapper) recorder() CallMetricsRecorder { + rw.once.Do(func() { + rw.r = newServerMetricsRecorder() + }) + return rw.r +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `internal.TrailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func (rw *recorderWrapper) setTrailerMetadata(ctx context.Context) { + var sm *ServerMetrics + if rw.smp != nil { + sm = rw.smp.ServerMetrics() + sm.merge(rw.r.ServerMetrics()) + } else { + sm = rw.r.ServerMetrics() + } + + b, err := proto.Marshal(sm.toLoadReportProto()) + if err != nil { + logger.Warningf("Failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(internal.TrailerMetadataKey, string(b))); err != nil { + logger.Warningf("Failed to set trailer metadata: %v", err) + } +} + +var joinServerOptions = grpcinternal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricsRecorder] to be used, via a call +// to CallMetricsRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// If a non-nil ServerMetricsProvider is provided, the gRPC server will +// transmit the metrics it provides, overwritten by any per-RPC metrics given +// to the CallMetricsRecorder. A ServerMetricsProvider is typically obtained +// by calling NewServerMetricsRecorder. +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) +} + +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ctx) + } + return resp, err + } +} + +func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ss.Context()) + } + return err + } +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricsRecorderCtxKey{}, r) +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricsRecorder corresponding to this +// stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/vendor/google.golang.org/grpc/orca/internal/internal.go b/vendor/google.golang.org/grpc/orca/internal/internal.go new file mode 100644 index 000000000..d1425c3e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/internal/internal.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains orca-internal code, for testing purposes and to +// avoid polluting the godoc of the top-level orca package. +package internal + +import ( + "errors" + "fmt" + + ibackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval +// configured via ServiceOptions, to a minimum of 30s. +// +// For testing purposes only. +var AllowAnyMinReportingInterval any // func(*ServiceOptions) + +// DefaultBackoffFunc is used by the producer to control its backoff behavior. +// +// For testing purposes only. +var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff + +// TrailerMetadataKey is the key in which the per-call backend metrics are +// transmitted. +const TrailerMetadataKey = "endpoint-load-metrics-bin" + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, if multiple load +// reports are found, or if the load report found cannot be parsed, an error is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(TrailerMetadataKey) + if len(vs) == 0 { + return nil, nil + } + if len(vs) != 1 { + return nil, errors.New("multiple orca load reports found in provided metadata") + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/vendor/google.golang.org/grpc/orca/orca.go b/vendor/google.golang.org/grpc/orca/orca.go new file mode 100644 index 000000000..d0cb3720c --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/orca.go @@ -0,0 +1,57 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package orca implements Open Request Cost Aggregation, which is an open +// standard for request cost aggregation and reporting by backends and the +// corresponding aggregation of such reports by L7 load balancers (such as +// Envoy) on the data plane. In a proxyless world with gRPC enabled +// applications, aggregation of such reports will be done by the gRPC client. +// +// # Experimental +// +// Notice: All APIs is this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package orca + +import ( + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" +) + +var logger = grpclog.Component("orca-backend-metrics") + +// loadParser implements the Parser interface defined in `internal/balancerload` +// package. This interface is used by the client stream to parse load reports +// sent by the server in trailer metadata. The parsed loads are then sent to +// balancers via balancer.DoneInfo. +// +// The grpc package cannot directly call toLoadReport() as that would cause an +// import cycle. Hence this roundabout method is used. +type loadParser struct{} + +func (loadParser) Parse(md metadata.MD) any { + lr, err := internal.ToLoadReport(md) + if err != nil { + logger.Infof("Parse failed: %v", err) + } + return lr +} + +func init() { + balancerload.SetParser(loadParser{}) +} diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go new file mode 100644 index 000000000..6e7c4c9f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -0,0 +1,223 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + "google.golang.org/protobuf/types/known/durationpb" +) + +type producerBuilder struct{} + +// Build constructs and returns a producer and its cleanup function +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &producer{ + client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), + intervals: make(map[time.Duration]int), + listeners: make(map[OOBListener]struct{}), + backoff: internal.DefaultBackoffFunc, + } + return p, func() { + <-p.stopped + } +} + +var producerBuilderSingleton = &producerBuilder{} + +// OOBListener is used to receive out-of-band load reports as they arrive. +type OOBListener interface { + // OnLoadReport is called when a load report is received. + OnLoadReport(*v3orcapb.OrcaLoadReport) +} + +// OOBListenerOptions contains options to control how an OOBListener is called. +type OOBListenerOptions struct { + // ReportInterval specifies how often to request the server to provide a + // load report. May be provided less frequently if the server requires a + // longer interval, or may be provided more frequently if another + // subscriber requests a shorter interval. + ReportInterval time.Duration +} + +// RegisterOOBListener registers an out-of-band load report listener on sc. +// Any OOBListener may only be registered once per subchannel at a time. The +// returned stop function must be called when no longer needed. Do not +// register a single OOBListener more than once per SubConn. +func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*producer) + + p.registerListener(l, opts.ReportInterval) + + // TODO: When we can register for SubConn state updates, automatically call + // stop() on SHUTDOWN. + + // If stop is called multiple times, prevent it from having any effect on + // subsequent calls. + return grpcsync.OnceFunc(func() { + p.unregisterListener(l, opts.ReportInterval) + closeFn() + }) +} + +type producer struct { + client v3orcaservicegrpc.OpenRcaServiceClient + + // backoff is called between stream attempts to determine how long to delay + // to avoid overloading a server experiencing problems. The attempt count + // is incremented when stream errors occur and is reset when the stream + // reports a result. + backoff func(int) time.Duration + + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners + minInterval time.Duration + stop func() // stops the current run goroutine + stopped chan struct{} // closed when the run goroutine exits +} + +// registerListener adds the listener and its requested report interval to the +// producer. +func (p *producer) registerListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + p.listeners[l] = struct{}{} + p.intervals[interval]++ + if len(p.listeners) == 1 || interval < p.minInterval { + p.minInterval = interval + p.updateRunLocked() + } +} + +// registerListener removes the listener and its requested report interval to +// the producer. +func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.listeners, l) + p.intervals[interval]-- + if p.intervals[interval] == 0 { + delete(p.intervals, interval) + + if p.minInterval == interval { + p.recomputeMinInterval() + p.updateRunLocked() + } + } +} + +// recomputeMinInterval sets p.minInterval to the minimum key's value in +// p.intervals. +func (p *producer) recomputeMinInterval() { + first := true + for interval := range p.intervals { + if first || interval < p.minInterval { + p.minInterval = interval + first = false + } + } +} + +// updateRunLocked is called whenever the run goroutine needs to be started / +// stopped / restarted due to: 1. the initial listener being registered, 2. the +// final listener being unregistered, or 3. the minimum registered interval +// changing. +func (p *producer) updateRunLocked() { + if p.stop != nil { + p.stop() + p.stop = nil + } + if len(p.listeners) > 0 { + var ctx context.Context + ctx, p.stop = context.WithCancel(context.Background()) + p.stopped = make(chan struct{}) + go p.run(ctx, p.stopped, p.minInterval) + } +} + +// run manages the ORCA OOB stream on the subchannel. +func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { + defer close(done) + + runStream := func() error { + resetBackoff, err := p.runStream(ctx, interval) + if status.Code(err) == codes.Unimplemented { + // Unimplemented; do not retry. + logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") + return err + } + // Retry for all other errors. + if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled { + // TODO: Unavailable and Canceled should also ideally log an error, + // but for now we receive them when shutting down the ClientConn + // (Unavailable if the stream hasn't started yet, and Canceled if it + // happens mid-stream). Once we can determine the state or ensure + // the producer is stopped before the stream ends, we can log an + // error when it's not a natural shutdown. + logger.Error("Received unexpected stream error:", err) + } + if resetBackoff { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStream, p.backoff) +} + +// runStream runs a single stream on the subchannel and returns the resulting +// error, if any, and whether or not the run loop should reset the backoff +// timer to zero or advance it. +func (p *producer) runStream(ctx context.Context, interval time.Duration) (resetBackoff bool, err error) { + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ + ReportInterval: durationpb.New(interval), + }) + if err != nil { + return false, err + } + + for { + report, err := stream.Recv() + if err != nil { + return resetBackoff, err + } + resetBackoff = true + p.mu.Lock() + for l := range p.listeners { + l.OnLoadReport(report) + } + p.mu.Unlock() + } +} diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go new file mode 100644 index 000000000..bb664d6a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -0,0 +1,352 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync/atomic" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// ServerMetrics is the data returned from a server to a client to describe the +// current state of the server and/or the cost of a request when used per-call. +type ServerMetrics struct { + CPUUtilization float64 // CPU utilization: [0, inf); unset=-1 + MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + AppUtilization float64 // Application utilization: [0, inf); unset=-1 + QPS float64 // queries per second: [0, inf); unset=-1 + EPS float64 // errors per second: [0, inf); unset=-1 + + // The following maps must never be nil. + + Utilization map[string]float64 // Custom fields: [0, 1.0] + RequestCost map[string]float64 // Custom fields: [0, inf); not sent OOB + NamedMetrics map[string]float64 // Custom fields: [0, inf); not sent OOB +} + +// toLoadReportProto dumps sm as an OrcaLoadReport proto. +func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { + ret := &v3orcapb.OrcaLoadReport{ + Utilization: sm.Utilization, + RequestCost: sm.RequestCost, + NamedMetrics: sm.NamedMetrics, + } + if sm.CPUUtilization != -1 { + ret.CpuUtilization = sm.CPUUtilization + } + if sm.MemUtilization != -1 { + ret.MemUtilization = sm.MemUtilization + } + if sm.AppUtilization != -1 { + ret.ApplicationUtilization = sm.AppUtilization + } + if sm.QPS != -1 { + ret.RpsFractional = sm.QPS + } + if sm.EPS != -1 { + ret.Eps = sm.EPS + } + return ret +} + +// merge merges o into sm, overwriting any values present in both. +func (sm *ServerMetrics) merge(o *ServerMetrics) { + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) + if o.CPUUtilization != -1 { + sm.CPUUtilization = o.CPUUtilization + } + if o.MemUtilization != -1 { + sm.MemUtilization = o.MemUtilization + } + if o.AppUtilization != -1 { + sm.AppUtilization = o.AppUtilization + } + if o.QPS != -1 { + sm.QPS = o.QPS + } + if o.EPS != -1 { + sm.EPS = o.EPS + } +} + +func mergeMap(a, b map[string]float64) { + for k, v := range b { + a[k] = v + } +} + +// ServerMetricsRecorder allows for recording and providing out of band server +// metrics. +type ServerMetricsRecorder interface { + ServerMetricsProvider + + // SetCPUUtilization sets the CPU utilization server metric. Must be + // greater than zero. + SetCPUUtilization(float64) + // DeleteCPUUtilization deletes the CPU utilization server metric to + // prevent it from being sent. + DeleteCPUUtilization() + + // SetMemoryUtilization sets the memory utilization server metric. Must be + // in the range [0, 1]. + SetMemoryUtilization(float64) + // DeleteMemoryUtilization deletes the memory utilization server metric to + // prevent it from being sent. + DeleteMemoryUtilization() + + // SetApplicationUtilization sets the application utilization server + // metric. Must be greater than zero. + SetApplicationUtilization(float64) + // DeleteApplicationUtilization deletes the application utilization server + // metric to prevent it from being sent. + DeleteApplicationUtilization() + + // SetQPS sets the Queries Per Second server metric. Must be greater than + // zero. + SetQPS(float64) + // DeleteQPS deletes the Queries Per Second server metric to prevent it + // from being sent. + DeleteQPS() + + // SetEPS sets the Errors Per Second server metric. Must be greater than + // zero. + SetEPS(float64) + // DeleteEPS deletes the Errors Per Second server metric to prevent it from + // being sent. + DeleteEPS() + + // SetNamedUtilization sets the named utilization server metric for the + // name provided. val must be in the range [0, 1]. + SetNamedUtilization(name string, val float64) + // DeleteNamedUtilization deletes the named utilization server metric for + // the name provided to prevent it from being sent. + DeleteNamedUtilization(name string) +} + +type serverMetricsRecorder struct { + state atomic.Pointer[ServerMetrics] // the current metrics +} + +// NewServerMetricsRecorder returns an in-memory store for ServerMetrics and +// allows for safe setting and retrieving of ServerMetrics. Also implements +// ServerMetricsProvider for use with NewService. +func NewServerMetricsRecorder() ServerMetricsRecorder { + return newServerMetricsRecorder() +} + +func newServerMetricsRecorder() *serverMetricsRecorder { + s := new(serverMetricsRecorder) + s.state.Store(&ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }) + return s +} + +// ServerMetrics returns a copy of the current ServerMetrics. +func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { + return copyServerMetrics(s.state.Load()) +} + +func copyMap(m map[string]float64) map[string]float64 { + ret := make(map[string]float64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyServerMetrics(sm *ServerMetrics) *ServerMetrics { + return &ServerMetrics{ + CPUUtilization: sm.CPUUtilization, + MemUtilization: sm.MemUtilization, + AppUtilization: sm.AppUtilization, + QPS: sm.QPS, + EPS: sm.EPS, + Utilization: copyMap(sm.Utilization), + RequestCost: copyMap(sm.RequestCost), + NamedMetrics: copyMap(sm.NamedMetrics), + } +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring CPU Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = val + s.state.Store(smCopy) +} + +// DeleteCPUUtilization deletes the relevant server metric to prevent it from +// being sent. +func (s *serverMetricsRecorder) DeleteCPUUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = -1 + s.state.Store(smCopy) +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Memory Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = val + s.state.Store(smCopy) +} + +// DeleteMemoryUtilization deletes the relevant server metric to prevent it +// from being sent. +func (s *serverMetricsRecorder) DeleteMemoryUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = -1 + s.state.Store(smCopy) +} + +// SetApplicationUtilization records a measurement for a generic utilization +// metric. +func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring Application Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = val + s.state.Store(smCopy) +} + +// DeleteApplicationUtilization deletes the relevant server metric to prevent +// it from being sent. +func (s *serverMetricsRecorder) DeleteApplicationUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = -1 + s.state.Store(smCopy) +} + +// SetQPS records a measurement for the QPS metric. +func (s *serverMetricsRecorder) SetQPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring QPS value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = val + s.state.Store(smCopy) +} + +// DeleteQPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteQPS() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = -1 + s.state.Store(smCopy) +} + +// SetEPS records a measurement for the EPS metric. +func (s *serverMetricsRecorder) SetEPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring EPS value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = val + s.state.Store(smCopy) +} + +// DeleteEPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteEPS() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = -1 + s.state.Store(smCopy) +} + +// SetNamedUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Named Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.Utilization[name] = val + s.state.Store(smCopy) +} + +// DeleteNamedUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.Utilization, name) + s.state.Store(smCopy) +} + +// SetRequestCost records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.RequestCost[name] = val + s.state.Store(smCopy) +} + +// DeleteRequestCost deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteRequestCost(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.RequestCost, name) + s.state.Store(smCopy) +} + +// SetNamedMetric records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.NamedMetrics[name] = val + s.state.Store(smCopy) +} + +// DeleteNamedMetric deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.NamedMetrics, name) + s.state.Store(smCopy) +} diff --git a/vendor/google.golang.org/grpc/orca/service.go b/vendor/google.golang.org/grpc/orca/service.go new file mode 100644 index 000000000..7461a6b05 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/service.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + ointernal "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +func init() { + ointernal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + so.allowAnyMinReportingInterval = true + } + internal.ORCAAllowAnyMinReportingInterval = ointernal.AllowAnyMinReportingInterval +} + +// minReportingInterval is the absolute minimum value supported for +// out-of-band metrics reporting from the ORCA service implementation +// provided by the orca package. +const minReportingInterval = 30 * time.Second + +// Service provides an implementation of the OpenRcaService as defined in the +// [ORCA] service protos. Instances of this type must be created via calls to +// Register() or NewService(). +// +// Server applications can use the SetXxx() and DeleteXxx() methods to record +// measurements corresponding to backend metrics, which eventually get pushed to +// clients who have initiated the SteamCoreMetrics streaming RPC. +// +// [ORCA]: https://github.com/cncf/xds/blob/main/xds/service/orca/v3/orca.proto +type Service struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + // Minimum reporting interval, as configured by the user, or the default. + minReportingInterval time.Duration + + smProvider ServerMetricsProvider +} + +// ServiceOptions contains options to configure the ORCA service implementation. +type ServiceOptions struct { + // ServerMetricsProvider is the provider to be used by the service for + // reporting OOB server metrics to clients. Typically obtained via + // NewServerMetricsRecorder. This field is required. + ServerMetricsProvider ServerMetricsProvider + + // MinReportingInterval sets the lower bound for how often out-of-band + // metrics are reported on the streaming RPC initiated by the client. If + // unspecified, negative or less than the default value of 30s, the default + // is used. Clients may request a higher value as part of the + // StreamCoreMetrics streaming RPC. + MinReportingInterval time.Duration + + // Allow a minReportingInterval which is less than the default of 30s. + // Used for testing purposes only. + allowAnyMinReportingInterval bool +} + +// A ServerMetricsProvider provides ServerMetrics upon request. +type ServerMetricsProvider interface { + // ServerMetrics returns the current set of server metrics. It should + // return a read-only, immutable copy of the data that is active at the + // time of the call. + ServerMetrics() *ServerMetrics +} + +// NewService creates a new ORCA service implementation configured using the +// provided options. +func NewService(opts ServiceOptions) (*Service, error) { + // The default minimum supported reporting interval value can be overridden + // for testing purposes through the orca internal package. + if opts.ServerMetricsProvider == nil { + return nil, fmt.Errorf("ServerMetricsProvider not specified") + } + if !opts.allowAnyMinReportingInterval { + if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { + opts.MinReportingInterval = minReportingInterval + } + } + service := &Service{ + minReportingInterval: opts.MinReportingInterval, + smProvider: opts.ServerMetricsProvider, + } + return service, nil +} + +// Register creates a new ORCA service implementation configured using the +// provided options and registers the same on the provided grpc Server. +func Register(s *grpc.Server, opts ServiceOptions) error { + // TODO(https://github.com/cncf/xds/issues/41): replace *grpc.Server with + // grpc.ServiceRegistrar when possible. + service, err := NewService(opts) + if err != nil { + return err + } + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) + return nil +} + +// determineReportingInterval determines the reporting interval for out-of-band +// metrics. If the reporting interval is not specified in the request, or is +// negative or is less than the configured minimum (via +// ServiceOptions.MinReportingInterval), the latter is used. Else the value from +// the incoming request is used. +func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReportRequest) time.Duration { + if req.GetReportInterval() == nil { + return s.minReportingInterval + } + dur := req.GetReportInterval().AsDuration() + if dur < s.minReportingInterval { + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using minimum", dur, s.minReportingInterval) + return s.minReportingInterval + } + return dur +} + +func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + return stream.Send(s.smProvider.ServerMetrics().toLoadReportProto()) +} + +// StreamCoreMetrics streams custom backend metrics injected by the server +// application. +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + ticker := time.NewTicker(s.determineReportingInterval(req)) + defer ticker.Stop() + + for { + if err := s.sendMetricsResponse(stream); err != nil { + return err + } + // Send a response containing the currently recorded metrics + select { + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + case <-ticker.C: + } + } +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go new file mode 100644 index 000000000..4af7f933c --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go @@ -0,0 +1,277 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opentelemetry + +import ( + "context" + "sync/atomic" + "time" + + "google.golang.org/grpc" + estats "google.golang.org/grpc/experimental/stats" + istats "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" +) + +type clientStatsHandler struct { + estats.MetricsRecorder + options Options + clientMetrics clientMetrics +} + +func (h *clientStatsHandler) initializeMetrics() { + // Will set no metrics to record, logically making this stats handler a + // no-op. + if h.options.MetricsOptions.MeterProvider == nil { + return + } + + meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version)) + if meter == nil { + return + } + + metrics := h.options.MetricsOptions.Metrics + if metrics == nil { + metrics = DefaultMetrics() + } + + h.clientMetrics.attemptStarted = createInt64Counter(metrics.Metrics(), "grpc.client.attempt.started", meter, otelmetric.WithUnit("attempt"), otelmetric.WithDescription("Number of client call attempts started.")) + h.clientMetrics.attemptDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.attempt.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + h.clientMetrics.attemptSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.clientMetrics.attemptRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.clientMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("Time taken by gRPC to complete an RPC from application's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + + rm := ®istryMetrics{ + optionalLabels: h.options.MetricsOptions.OptionalLabels, + } + h.MetricsRecorder = rm + rm.registerMetrics(metrics, meter) +} + +func (h *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ci := &callInfo{ + target: cc.CanonicalTarget(), + method: h.determineMethod(method, opts...), + } + ctx = setCallInfo(ctx, ci) + + if h.options.MetricsOptions.pluginOption != nil { + md := h.options.MetricsOptions.pluginOption.GetMetadata() + for k, vs := range md { + for _, v := range vs { + ctx = metadata.AppendToOutgoingContext(ctx, k, v) + } + } + } + + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + h.perCallMetrics(ctx, err, startTime, ci) + return err +} + +// determineMethod determines the method to record attributes with. This will be +// "other" if StaticMethod isn't specified or if method filter is set and +// specifies, the method name as is otherwise. +func (h *clientStatsHandler) determineMethod(method string, opts ...grpc.CallOption) string { + for _, opt := range opts { + if _, ok := opt.(grpc.StaticMethodCallOption); ok { + return removeLeadingSlash(method) + } + } + return "other" +} + +func (h *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ci := &callInfo{ + target: cc.CanonicalTarget(), + method: h.determineMethod(method, opts...), + } + ctx = setCallInfo(ctx, ci) + + if h.options.MetricsOptions.pluginOption != nil { + md := h.options.MetricsOptions.pluginOption.GetMetadata() + for k, vs := range md { + for _, v := range vs { + ctx = metadata.AppendToOutgoingContext(ctx, k, v) + } + } + } + + startTime := time.Now() + + callback := func(err error) { + h.perCallMetrics(ctx, err, startTime, ci) + } + opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...) + return streamer(ctx, desc, cc, method, opts...) +} + +func (h *clientStatsHandler) perCallMetrics(ctx context.Context, err error, startTime time.Time, ci *callInfo) { + callLatency := float64(time.Since(startTime)) / float64(time.Second) // calculate ASAP + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + otelattribute.String("grpc.status", canonicalString(status.Code(err))), + )) + h.clientMetrics.callDuration.Record(ctx, callLatency, attrs) +} + +// TagConn exists to satisfy stats.Handler. +func (h *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (h *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC attempt context management. +func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + // Numerous stats handlers can be used for the same channel. The cluster + // impl balancer which writes to this will only write once, thus have this + // stats handler's per attempt scoped context point to the same optional + // labels map if set. + var labels *istats.Labels + if labels = istats.GetLabels(ctx); labels == nil { + labels = &istats.Labels{ + // The defaults for all the per call labels from a plugin that + // executes on the callpath that this OpenTelemetry component + // currently supports. + TelemetryLabels: map[string]string{ + "grpc.lb.locality": "", + }, + } + ctx = istats.SetLabels(ctx, labels) + } + ai := &attemptInfo{ // populates information about RPC start. + startTime: time.Now(), + xdsLabels: labels.TelemetryLabels, + method: info.FullMethodName, + } + ri := &rpcInfo{ + ai: ai, + } + return setRPCInfo(ctx, ri) +} + +func (h *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no client attempt data present") + return + } + h.processRPCEvent(ctx, rs, ri.ai) +} + +func (h *clientStatsHandler) processRPCEvent(ctx context.Context, s stats.RPCStats, ai *attemptInfo) { + switch st := s.(type) { + case *stats.Begin: + ci := getCallInfo(ctx) + if ci == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present") + return + } + + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + )) + h.clientMetrics.attemptStarted.Add(ctx, 1, attrs) + case *stats.OutPayload: + atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength)) + case *stats.InPayload: + atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength)) + case *stats.InHeader: + h.setLabelsFromPluginOption(ai, st.Header) + case *stats.InTrailer: + h.setLabelsFromPluginOption(ai, st.Trailer) + case *stats.End: + h.processRPCEnd(ctx, ai, st) + default: + } +} + +func (h *clientStatsHandler) setLabelsFromPluginOption(ai *attemptInfo, incomingMetadata metadata.MD) { + if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil { + labels := h.options.MetricsOptions.pluginOption.GetLabels(incomingMetadata) + if labels == nil { + labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt. + } + ai.pluginOptionLabels = labels + } +} + +func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) { + ci := getCallInfo(ctx) + if ci == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present") + return + } + latency := float64(time.Since(ai.startTime)) / float64(time.Second) + st := "OK" + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } + + attributes := []otelattribute.KeyValue{ + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + otelattribute.String("grpc.status", st), + } + + for k, v := range ai.pluginOptionLabels { + attributes = append(attributes, otelattribute.String(k, v)) + } + + for _, o := range h.options.MetricsOptions.OptionalLabels { + // TODO: Add a filter for converting to unknown if not present in the + // CSM Plugin Option layer by adding an optional labels API. + if val, ok := ai.xdsLabels[o]; ok { + attributes = append(attributes, otelattribute.String(o, val)) + } + } + + // Allocate vararg slice once. + opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))} + h.clientMetrics.attemptDuration.Record(ctx, latency, opts...) + h.clientMetrics.attemptSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...) + h.clientMetrics.attemptRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...) +} + +const ( + // ClientAttemptStarted is the number of client call attempts started. + ClientAttemptStarted estats.Metric = "grpc.client.attempt.started" + // ClientAttemptDuration is the end-to-end time taken to complete a client + // call attempt. + ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration" + // ClientAttemptSentCompressedTotalMessageSize is the compressed message + // bytes sent per client call attempt. + ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size" + // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message + // bytes received per call attempt. + ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size" + // ClientCallDuration is the time taken by gRPC to complete an RPC from + // application's perspective. + ClientCallDuration estats.Metric = "grpc.client.call.duration" +) diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go new file mode 100644 index 000000000..b595aa85f --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal defines the PluginOption interface. +package internal + +import ( + "google.golang.org/grpc/metadata" +) + +// SetPluginOption sets the plugin option on Options. +var SetPluginOption any // func(*Options, PluginOption) + +// PluginOption is the interface which represents a plugin option for the +// OpenTelemetry instrumentation component. This plugin option emits labels from +// metadata and also creates metadata containing labels. These labels are +// intended to be added to applicable OpenTelemetry metrics recorded in the +// OpenTelemetry instrumentation component. +// +// In the future, we hope to stabilize and expose this API to allow plugins to +// inject labels of their choosing into metrics recorded. +type PluginOption interface { + // GetMetadata creates a MD with metadata exchange labels. + GetMetadata() metadata.MD + // GetLabels emits labels to be attached to metrics for the RPC that + // contains the provided incomingMetadata. + GetLabels(incomingMetadata metadata.MD) map[string]string +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go new file mode 100644 index 000000000..cc5ad387f --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go @@ -0,0 +1,390 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package opentelemetry implements opentelemetry instrumentation code for +// gRPC-Go clients and servers. +package opentelemetry + +import ( + "context" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + otelinternal "google.golang.org/grpc/stats/opentelemetry/internal" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +func init() { + otelinternal.SetPluginOption = func(o *Options, po otelinternal.PluginOption) { + o.MetricsOptions.pluginOption = po + } +} + +var logger = grpclog.Component("otel-plugin") + +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + +var joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption) + +// Options are the options for OpenTelemetry instrumentation. +type Options struct { + // MetricsOptions are the metrics options for OpenTelemetry instrumentation. + MetricsOptions MetricsOptions +} + +// MetricsOptions are the metrics options for OpenTelemetry instrumentation. +type MetricsOptions struct { + // MeterProvider is the MeterProvider instance that will be used to create + // instruments. To enable metrics collection, set a meter provider. If + // unset, no metrics will be recorded. Any implementation knobs (i.e. views, + // bounds) set in the MeterProvider take precedence over the API calls from + // this interface. (i.e. it will create default views for unset views). + MeterProvider otelmetric.MeterProvider + + // Metrics are the metrics to instrument. Will create instrument and record telemetry + // for corresponding metric supported by the client and server + // instrumentation components if applicable. If not set, the default metrics + // will be recorded. + Metrics *estats.Metrics + + // MethodAttributeFilter is to record the method name of RPCs handled by + // grpc.UnknownServiceHandler, but take care to limit the values allowed, as + // allowing too many will increase cardinality and could cause severe memory + // or performance problems. On Client Side, pass a + // grpc.StaticMethodCallOption as a call option into Invoke or NewStream. + // This only applies for server side metrics. + MethodAttributeFilter func(string) bool + + // OptionalLabels are labels received from LB Policies that this component + // should add to metrics that record after receiving incoming metadata. + OptionalLabels []string + + // pluginOption is used to get labels to attach to certain metrics, if set. + pluginOption otelinternal.PluginOption +} + +// DialOption returns a dial option which enables OpenTelemetry instrumentation +// code for a grpc.ClientConn. +// +// Client applications interested in instrumenting their grpc.ClientConn should +// pass the dial option returned from this function as a dial option to +// grpc.NewClient(). +// +// For the metrics supported by this instrumentation code, specify the client +// metrics to record in metrics options. Also provide an implementation of a +// MeterProvider. If the passed in Meter Provider does not have the view +// configured for an individual metric turned on, the API call in this component +// will create a default view for that metric. +func DialOption(o Options) grpc.DialOption { + csh := &clientStatsHandler{options: o} + csh.initializeMetrics() + return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh)) +} + +var joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// ServerOption returns a server option which enables OpenTelemetry +// instrumentation code for a grpc.Server. +// +// Server applications interested in instrumenting their grpc.Server should pass +// the server option returned from this function as an argument to +// grpc.NewServer(). +// +// For the metrics supported by this instrumentation code, specify the server +// metrics to record in metrics options. Also provide an implementation of a +// MeterProvider. If the passed in Meter Provider does not have the view +// configured for an individual metric turned on, the API call in this component +// will create a default view for that metric. +func ServerOption(o Options) grpc.ServerOption { + ssh := &serverStatsHandler{options: o} + ssh.initializeMetrics() + return joinServerOptions(grpc.ChainUnaryInterceptor(ssh.unaryInterceptor), grpc.ChainStreamInterceptor(ssh.streamInterceptor), grpc.StatsHandler(ssh)) +} + +// callInfo is information pertaining to the lifespan of the RPC client side. +type callInfo struct { + target string + + method string +} + +type callInfoKey struct{} + +func setCallInfo(ctx context.Context, ci *callInfo) context.Context { + return context.WithValue(ctx, callInfoKey{}, ci) +} + +// getCallInfo returns the callInfo stored in the context, or nil +// if there isn't one. +func getCallInfo(ctx context.Context) *callInfo { + ci, _ := ctx.Value(callInfoKey{}).(*callInfo) + return ci +} + +// rpcInfo is RPC information scoped to the RPC attempt life span client side, +// and the RPC life span server side. +type rpcInfo struct { + ai *attemptInfo +} + +type rpcInfoKey struct{} + +func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context { + return context.WithValue(ctx, rpcInfoKey{}, ri) +} + +// getRPCInfo returns the rpcInfo stored in the context, or nil +// if there isn't one. +func getRPCInfo(ctx context.Context) *rpcInfo { + ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo) + return ri +} + +func removeLeadingSlash(mn string) string { + return strings.TrimLeft(mn, "/") +} + +// attemptInfo is RPC information scoped to the RPC attempt life span client +// side, and the RPC life span server side. +type attemptInfo struct { + // access these counts atomically for hedging in the future: + // number of bytes after compression (within each message) from side (client + // || server). + sentCompressedBytes int64 + // number of compressed bytes received (within each message) received on + // side (client || server). + recvCompressedBytes int64 + + startTime time.Time + method string + + pluginOptionLabels map[string]string // pluginOptionLabels to attach to metrics emitted + xdsLabels map[string]string +} + +type clientMetrics struct { + // "grpc.client.attempt.started" + attemptStarted otelmetric.Int64Counter + // "grpc.client.attempt.duration" + attemptDuration otelmetric.Float64Histogram + // "grpc.client.attempt.sent_total_compressed_message_size" + attemptSentTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.client.attempt.rcvd_total_compressed_message_size" + attemptRcvdTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.client.call.duration" + callDuration otelmetric.Float64Histogram +} + +type serverMetrics struct { + // "grpc.server.call.started" + callStarted otelmetric.Int64Counter + // "grpc.server.call.sent_total_compressed_message_size" + callSentTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.server.call.rcvd_total_compressed_message_size" + callRcvdTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.server.call.duration" + callDuration otelmetric.Float64Histogram +} + +func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Counter{} + } + ret, err := meter.Int64Counter(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Counter{} + } + return ret +} + +func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Float64Counter{} + } + ret, err := meter.Float64Counter(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Float64Counter{} + } + return ret +} + +func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Histogram{} + } + ret, err := meter.Int64Histogram(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Histogram{} + } + return ret +} + +func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Float64Histogram{} + } + ret, err := meter.Float64Histogram(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Float64Histogram{} + } + return ret +} + +func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Gauge{} + } + ret, err := meter.Int64Gauge(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Gauge{} + } + return ret +} + +func optionFromLabels(labelKeys []string, optionalLabelKeys []string, optionalLabels []string, labelVals ...string) otelmetric.MeasurementOption { + var attributes []otelattribute.KeyValue + + // Once it hits here lower level has guaranteed length of labelVals matches + // labelKeys + optionalLabelKeys. + for i, label := range labelKeys { + attributes = append(attributes, otelattribute.String(label, labelVals[i])) + } + + for i, label := range optionalLabelKeys { + for _, optLabel := range optionalLabels { // o(n) could build out a set but n is currently capped at < 5 + if label == optLabel { + attributes = append(attributes, otelattribute.String(label, labelVals[i+len(labelKeys)])) + } + } + } + return otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...)) +} + +// registryMetrics implements MetricsRecorder for the client and server stats +// handlers. +type registryMetrics struct { + intCounts map[*estats.MetricDescriptor]otelmetric.Int64Counter + floatCounts map[*estats.MetricDescriptor]otelmetric.Float64Counter + intHistos map[*estats.MetricDescriptor]otelmetric.Int64Histogram + floatHistos map[*estats.MetricDescriptor]otelmetric.Float64Histogram + intGauges map[*estats.MetricDescriptor]otelmetric.Int64Gauge + + optionalLabels []string +} + +func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) { + rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter) + rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter) + rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram) + rm.floatHistos = make(map[*estats.MetricDescriptor]otelmetric.Float64Histogram) + rm.intGauges = make(map[*estats.MetricDescriptor]otelmetric.Int64Gauge) + + for metric := range metrics.Metrics() { + desc := estats.DescriptorForMetric(metric) + if desc == nil { + // Either the metric was per call or the metric is not registered. + // Thus, if this component ever receives the desc as a handle in + // record it will be a no-op. + continue + } + switch desc.Type { + case estats.MetricTypeIntCount: + rm.intCounts[desc] = createInt64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + case estats.MetricTypeFloatCount: + rm.floatCounts[desc] = createFloat64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + case estats.MetricTypeIntHisto: + rm.intHistos[desc] = createInt64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...)) + case estats.MetricTypeFloatHisto: + rm.floatHistos[desc] = createFloat64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...)) + case estats.MetricTypeIntGauge: + rm.intGauges[desc] = createInt64Gauge(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + } + } +} + +func (rm *registryMetrics) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ic, ok := rm.intCounts[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ic.Add(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + desc := handle.Descriptor() + if fc, ok := rm.floatCounts[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + fc.Add(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ih, ok := rm.intHistos[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ih.Record(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + desc := handle.Descriptor() + if fh, ok := rm.floatHistos[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + fh.Record(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ig, ok := rm.intGauges[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ig.Record(context.TODO(), incr, ao) + } +} + +// Users of this component should use these bucket boundaries as part of their +// SDK MeterProvider passed in. This component sends this as "advice" to the +// API, which works, however this stability is not guaranteed, so for safety the +// SDK Meter Provider provided should set these bounds for corresponding +// metrics. +var ( + // DefaultLatencyBounds are the default bounds for latency metrics. + DefaultLatencyBounds = []float64{0, 0.00001, 0.00005, 0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.008, 0.01, 0.013, 0.016, 0.02, 0.025, 0.03, 0.04, 0.05, 0.065, 0.08, 0.1, 0.13, 0.16, 0.2, 0.25, 0.3, 0.4, 0.5, 0.65, 0.8, 1, 2, 5, 10, 20, 50, 100} // provide "advice" through API, SDK should set this too + // DefaultSizeBounds are the default bounds for metrics which record size. + DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + // defaultPerCallMetrics are the default metrics provided by this module. + defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration) +) + +// DefaultMetrics returns a set of default OpenTelemetry metrics. +// +// This should only be invoked after init time. +func DefaultMetrics() *estats.Metrics { + return defaultPerCallMetrics.Join(estats.DefaultMetrics) +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go new file mode 100644 index 000000000..eaea559b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go @@ -0,0 +1,278 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opentelemetry + +import ( + "context" + "sync/atomic" + "time" + + "google.golang.org/grpc" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" +) + +type serverStatsHandler struct { + estats.MetricsRecorder + options Options + serverMetrics serverMetrics +} + +func (h *serverStatsHandler) initializeMetrics() { + // Will set no metrics to record, logically making this stats handler a + // no-op. + if h.options.MetricsOptions.MeterProvider == nil { + return + } + + meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version)) + if meter == nil { + return + } + metrics := h.options.MetricsOptions.Metrics + if metrics == nil { + metrics = DefaultMetrics() + } + + h.serverMetrics.callStarted = createInt64Counter(metrics.Metrics(), "grpc.server.call.started", meter, otelmetric.WithUnit("call"), otelmetric.WithDescription("Number of server calls started.")) + h.serverMetrics.callSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.serverMetrics.callRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.serverMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.server.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a call from server transport's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + + rm := ®istryMetrics{ + optionalLabels: h.options.MetricsOptions.OptionalLabels, + } + h.MetricsRecorder = rm + rm.registerMetrics(metrics, meter) +} + +// attachLabelsTransportStream intercepts SetHeader and SendHeader calls of the +// underlying ServerTransportStream to attach metadataExchangeLabels. +type attachLabelsTransportStream struct { + grpc.ServerTransportStream + + attachedLabels atomic.Bool + metadataExchangeLabels metadata.MD +} + +func (s *attachLabelsTransportStream) SetHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerTransportStream.SetHeader(s.metadataExchangeLabels) + } + return s.ServerTransportStream.SetHeader(md) +} + +func (s *attachLabelsTransportStream) SendHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerTransportStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerTransportStream.SendHeader(md) +} + +func (h *serverStatsHandler) unaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + var metadataExchangeLabels metadata.MD + if h.options.MetricsOptions.pluginOption != nil { + metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata() + } + + sts := grpc.ServerTransportStreamFromContext(ctx) + + alts := &attachLabelsTransportStream{ + ServerTransportStream: sts, + metadataExchangeLabels: metadataExchangeLabels, + } + ctx = grpc.NewContextWithServerTransportStream(ctx, alts) + + res, err := handler(ctx, req) + if err != nil { // maybe trailers-only if headers haven't already been sent + if !alts.attachedLabels.Swap(true) { + alts.SetTrailer(alts.metadataExchangeLabels) + } + } else { // headers will be written; a message was sent + if !alts.attachedLabels.Swap(true) { + alts.SetHeader(alts.metadataExchangeLabels) + } + } + + return res, err +} + +// attachLabelsStream embeds a grpc.ServerStream, and intercepts the +// SetHeader/SendHeader/SendMsg/SendTrailer call to attach metadata exchange +// labels. +type attachLabelsStream struct { + grpc.ServerStream + + attachedLabels atomic.Bool + metadataExchangeLabels metadata.MD +} + +func (s *attachLabelsStream) SetHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerStream.SetHeader(md) +} + +func (s *attachLabelsStream) SendHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerStream.SendHeader(md) +} + +func (s *attachLabelsStream) SendMsg(m any) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + return s.ServerStream.SendMsg(m) +} + +func (h *serverStatsHandler) streamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + var metadataExchangeLabels metadata.MD + if h.options.MetricsOptions.pluginOption != nil { + metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata() + } + als := &attachLabelsStream{ + ServerStream: ss, + metadataExchangeLabels: metadataExchangeLabels, + } + err := handler(srv, als) + + // Add metadata exchange labels to trailers if never sent in headers, + // irrespective of whether or not RPC failed. + if !als.attachedLabels.Load() { + als.SetTrailer(als.metadataExchangeLabels) + } + return err +} + +// TagConn exists to satisfy stats.Handler. +func (h *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (h *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC context management. +func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + method := info.FullMethodName + if h.options.MetricsOptions.MethodAttributeFilter != nil { + if !h.options.MetricsOptions.MethodAttributeFilter(method) { + method = "other" + } + } + server := internal.ServerFromContext.(func(context.Context) *grpc.Server)(ctx) + if server == nil { // Shouldn't happen, defensive programming. + logger.Error("ctx passed into server side stats handler has no grpc server ref") + method = "other" + } else { + isRegisteredMethod := internal.IsRegisteredMethod.(func(*grpc.Server, string) bool) + if !isRegisteredMethod(server, method) { + method = "other" + } + } + + ai := &attemptInfo{ + startTime: time.Now(), + method: removeLeadingSlash(method), + } + ri := &rpcInfo{ + ai: ai, + } + return setRPCInfo(ctx, ri) +} + +// HandleRPC implements per RPC tracing and stats implementation. +func (h *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + logger.Error("ctx passed into server side stats handler metrics event handling has no server call data present") + return + } + h.processRPCData(ctx, rs, ri.ai) +} + +func (h *serverStatsHandler) processRPCData(ctx context.Context, s stats.RPCStats, ai *attemptInfo) { + switch st := s.(type) { + case *stats.InHeader: + if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil { + labels := h.options.MetricsOptions.pluginOption.GetLabels(st.Header) + if labels == nil { + labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt. + } + ai.pluginOptionLabels = labels + } + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ai.method), + )) + h.serverMetrics.callStarted.Add(ctx, 1, attrs) + case *stats.OutPayload: + atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength)) + case *stats.InPayload: + atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength)) + case *stats.End: + h.processRPCEnd(ctx, ai, st) + default: + } +} + +func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) { + latency := float64(time.Since(ai.startTime)) / float64(time.Second) + st := "OK" + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } + attributes := []otelattribute.KeyValue{ + otelattribute.String("grpc.method", ai.method), + otelattribute.String("grpc.status", st), + } + for k, v := range ai.pluginOptionLabels { + attributes = append(attributes, otelattribute.String(k, v)) + } + + // Allocate vararg slice once. + opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))} + h.serverMetrics.callDuration.Record(ctx, latency, opts...) + h.serverMetrics.callSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...) + h.serverMetrics.callRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...) +} + +const ( + // ServerCallStarted is the number of server calls started. + ServerCallStarted estats.Metric = "grpc.server.call.started" + // ServerCallSentCompressedTotalMessageSize is the compressed message bytes + // sent per server call. + ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size" + // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes + // received per server call. + ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size" + // ServerCallDuration is the end-to-end time taken to complete a call from + // server transport's perspective. + ServerCallDuration estats.Metric = "grpc.server.call.duration" +) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 187fbf119..a96b6a6bf 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.67.0" +const Version = "1.67.1" diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go new file mode 100644 index 000000000..ef55ff0c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to register possible options +// for aspects of the xDS client through the bootstrap file. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" +) + +// registry is a map from credential type name to Credential builder. +var registry = make(map[string]Credentials) + +// Credentials interface encapsulates a credentials.Bundle builder +// that can be used for communicating with the xDS Management server. +type Credentials interface { + // Build returns a credential bundle associated with this credential, and + // a function to cleans up additional resources associated with this bundle + // when it is no longer needed. + Build(config json.RawMessage) (credentials.Bundle, func(), error) + // Name returns the credential name associated with this credential. + Name() string +} + +// RegisterCredentials registers Credentials used for connecting to the xds +// management server. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple credentials are +// registered with the same name, the one registered last will take effect. +func RegisterCredentials(c Credentials) { + registry[c.Name()] = c +} + +// GetCredentials returns the credentials associated with a given name. +// If no credentials are registered with the name, nil will be returned. +func GetCredentials(name string) Credentials { + if c, ok := registry[name]; ok { + return c + } + + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go new file mode 100644 index 000000000..578e12789 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/xds/bootstrap/tlscreds" +) + +func init() { + RegisterCredentials(&insecureCredsBuilder{}) + RegisterCredentials(&googleDefaultCredsBuilder{}) + RegisterCredentials(&tlsCredsBuilder{}) +} + +// insecureCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates an insecure credential. +type insecureCredsBuilder struct{} + +func (i *insecureCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { + return insecure.NewBundle(), func() {}, nil +} + +func (i *insecureCredsBuilder) Name() string { + return "insecure" +} + +// tlsCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates a TLS credential. +type tlsCredsBuilder struct{} + +func (t *tlsCredsBuilder) Build(config json.RawMessage) (credentials.Bundle, func(), error) { + return tlscreds.NewBundle(config) +} + +func (t *tlsCredsBuilder) Name() string { + return "tls" +} + +// googleDefaultCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates a Google Default credential. +type googleDefaultCredsBuilder struct{} + +func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { + return google.NewDefaultCredentials(), func() {}, nil +} + +func (d *googleDefaultCredsBuilder) Name() string { + return "google_default" +} diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go new file mode 100644 index 000000000..3d8398a72 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package csds implements features to dump the status (xDS responses) the +// xds_client is using. +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a later +// release. +package csds + +import ( + "context" + "fmt" + "io" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +var logger = grpclog.Component("xds") + +const prefix = "[csds-server %p] " + +func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, s)) +} + +// ClientStatusDiscoveryServer provides an implementation of the Client Status +// Discovery Service (CSDS) for exposing the xDS config of a given client. See +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto. +// +// For more details about the gRPC implementation of CSDS, refer to gRPC A40 at: +// https://github.com/grpc/proposal/blob/master/A40-csds-support.md. +type ClientStatusDiscoveryServer struct { + logger *internalgrpclog.PrefixLogger +} + +// NewClientStatusDiscoveryServer returns an implementation of the CSDS server +// that can be registered on a gRPC server. +func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { + s := &ClientStatusDiscoveryServer{} + s.logger = prefixLogger(s) + s.logger.Infof("Created CSDS server") + return s, nil +} + +// StreamClientStatus implements interface ClientStatusDiscoveryServiceServer. +func (s *ClientStatusDiscoveryServer) StreamClientStatus(stream v3statusgrpc.ClientStatusDiscoveryService_StreamClientStatusServer) error { + for { + req, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + resp, err := s.buildClientStatusRespForReq(req) + if err != nil { + return err + } + if err := stream.Send(resp); err != nil { + return err + } + } +} + +// FetchClientStatus implements interface ClientStatusDiscoveryServiceServer. +func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + return s.buildClientStatusRespForReq(req) +} + +// buildClientStatusRespForReq fetches the status of xDS resources from the +// xdsclient, and returns the response to be sent back to the csds client. +// +// If it returns an error, the error is a status error. +func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + // Field NodeMatchers is unsupported, by design + // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. + if len(req.NodeMatchers) != 0 { + return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) + } + + return xdsclient.DumpResources(), nil +} + +// Close cleans up the resources. +func (s *ClientStatusDiscoveryServer) Close() {} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go new file mode 100644 index 000000000..936bf2da3 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package googledirectpath implements a resolver that configures xds to make +// cloud to prod directpath connection. +// +// It's a combo of DNS and xDS resolvers. It delegates to DNS if +// - not on GCE, or +// - xDS bootstrap env var is set (so this client needs to do normal xDS, not +// direct path, and clients with this scheme is not part of the xDS mesh). +package googledirectpath + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/url" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/googlecloud" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + + _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. +) + +const ( + c2pScheme = "google-c2p" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" + + tdURL = "dns:///directpath-pa.googleapis.com" + zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" + ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" + ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" + httpReqTimeout = 10 * time.Second + + logPrefix = "[google-c2p-resolver]" + dnsName, xdsName = "dns", "xds" +) + +// For overriding in unittests. +var ( + onGCE = googlecloud.OnGCE + randInt = rand.Int + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) +) + +func init() { + resolver.Register(c2pResolverBuilder{}) +} + +type c2pResolverBuilder struct{} + +func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if t.URL.Host != "" { + return nil, fmt.Errorf("google-c2p URI scheme does not support authorities") + } + + if !runDirectPath() { + // If not xDS, fallback to DNS. + t.URL.Scheme = dnsName + return resolver.Get(dnsName).Build(t, cc, opts) + } + + // Note that the following calls to getZone() and getIPv6Capable() does I/O, + // and has 10 seconds timeout each. + // + // This should be fine in most of the cases. In certain error cases, this + // could block Dial() for up to 10 seconds (each blocking call has its own + // goroutine). + zoneCh, ipv6CapableCh := make(chan string), make(chan bool) + go func() { zoneCh <- getZone(httpReqTimeout) }() + go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() + + xdsServerURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI + if xdsServerURI == "" { + xdsServerURI = tdURL + } + + nodeCfg := newNodeConfig(<-zoneCh, <-ipv6CapableCh) + xdsServerCfg := newXdsServerConfig(xdsServerURI) + authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) + + cfg := map[string]any{ + "xds_servers": []any{xdsServerCfg}, + "client_default_listener_resource_name_template": "%s", + "authorities": authoritiesCfg, + "node": nodeCfg, + } + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err) + } + if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil { + return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err) + } + + t = resolver.Target{ + URL: url.URL{ + Scheme: xdsName, + Host: c2pAuthority, + Path: t.URL.Path, + }, + } + return resolver.Get(xdsName).Build(t, cc, opts) +} + +func (b c2pResolverBuilder) Scheme() string { + return c2pScheme +} + +func newNodeConfig(zone string, ipv6Capable bool) map[string]any { + node := map[string]any{ + "id": fmt.Sprintf("C2P-%d", randInt()), + "locality": map[string]any{"zone": zone}, + } + if ipv6Capable { + node["metadata"] = map[string]any{ipv6CapableMetadataName: true} + } + return node +} + +func newAuthoritiesConfig(serverCfg map[string]any) map[string]any { + return map[string]any{ + c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}}, + } +} + +func newXdsServerConfig(uri string) map[string]any { + return map[string]any{ + "server_uri": uri, + "channel_creds": []map[string]any{{"type": "google_default"}}, + "server_features": []any{"ignore_resource_deletion"}, + } +} + +// runDirectPath returns whether this resolver should use direct path. +// +// direct path is enabled if this client is running on GCE. +func runDirectPath() bool { + return onGCE() +} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go b/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go new file mode 100644 index 000000000..32a28a167 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googledirectpath + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { + parsedURL, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + client := &http.Client{Timeout: timeout} + req := &http.Request{ + Method: http.MethodGet, + URL: parsedURL, + Header: http.Header{"Metadata-Flavor": {"Google"}}, + } + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed communicating with metadata server: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("metadata server returned resp with non-OK: %v", resp) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed reading from metadata server: %v", err) + } + return body, nil +} + +var ( + zone string + zoneOnce sync.Once +) + +// Defined as var to be overridden in tests. +var getZone = func(timeout time.Duration) string { + zoneOnce.Do(func() { + qualifiedZone, err := getFromMetadata(timeout, zoneURL) + if err != nil { + logger.Warningf("could not discover instance zone: %v", err) + return + } + i := bytes.LastIndexByte(qualifiedZone, '/') + if i == -1 { + logger.Warningf("could not parse zone from metadata server: %s", qualifiedZone) + return + } + zone = string(qualifiedZone[i+1:]) + }) + return zone +} + +var ( + ipv6Capable bool + ipv6CapableOnce sync.Once +) + +// Defined as var to be overridden in tests. +var getIPv6Capable = func(timeout time.Duration) bool { + ipv6CapableOnce.Do(func() { + addr, err := getFromMetadata(timeout, ipv6URL) + if err != nil { + logger.Warningf("could not discover ipv6 capability: %v", err) + return + } + if trimmedAddr := strings.TrimSpace(string(addr)); trimmedAddr == "" { + logger.Warningf("metadata server returned empty ipv6 address") + return + } + ipv6Capable = true + }) + return ipv6Capable +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go new file mode 100644 index 000000000..ff27af026 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer installs all the xds balancers. +package balancer + +import ( + _ "google.golang.org/grpc/balancer/leastrequest" // Register the least_request_experimental balancer + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // Register the outlier_detection balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer +) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go new file mode 100644 index 000000000..9a112e276 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -0,0 +1,671 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cdsbalancer implements a balancer to handle CDS responses. +package cdsbalancer + +import ( + "context" + "encoding/json" + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal/balancer/nop" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + cdsName = "cds_experimental" + aggregateClusterMaxDepth = 16 +) + +var ( + errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed") + errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth) + + // newChildBalancer is a helper function to build a new cluster_resolver + // balancer and will be overridden in unittests. + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) + } + // We directly pass the parent clientConn to the underlying + // cluster_resolver balancer because the cdsBalancer does not deal with + // subConns. + return builder.Build(cc, opts), nil + } + buildProvider = buildProviderFunc +) + +func init() { + balancer.Register(bb{}) +} + +// bb implements the balancer.Builder interface to help build a cdsBalancer. +// It also implements the balancer.ConfigParser interface to help parse the +// JSON service config, to be passed to the cdsBalancer. +type bb struct{} + +// Build creates a new CDS balancer with the ClientConn. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + } + + ctx, cancel := context.WithCancel(context.Background()) + hi := xdsinternal.NewHandshakeInfo(nil, nil, nil, false) + xdsHIPtr := unsafe.Pointer(hi) + b := &cdsBalancer{ + bOpts: opts, + childConfigParser: parser, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + xdsHIPtr: &xdsHIPtr, + watchers: make(map[string]*watcherState), + } + b.ccw = &ccWrapper{ + ClientConn: cc, + xdsHIPtr: b.xdsHIPtr, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + var creds credentials.TransportCredentials + switch { + case opts.DialCreds != nil: + creds = opts.DialCreds + case opts.CredsBundle != nil: + creds = opts.CredsBundle.TransportCredentials() + } + if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() { + b.xdsCredsInUse = true + } + b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) + return b +} + +// Name returns the name of balancers built by this builder. +func (bb) Name() string { + return cdsName +} + +// lbConfig represents the loadBalancingConfig section of the service config +// for the cdsBalancer. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + ClusterName string `json:"Cluster"` +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg lbConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) + } + return &cfg, nil +} + +// cdsBalancer implements a CDS based LB policy. It instantiates a +// cluster_resolver balancer to further resolve the serviceName received from +// CDS, into localities and endpoints. Implements the balancer.Balancer +// interface which is exposed to gRPC and implements the balancer.ClientConn +// interface which is exposed to the cluster_resolver balancer. +type cdsBalancer struct { + // The following fields are initialized at build time and are either + // read-only after that or provide their own synchronization, and therefore + // do not need to be guarded by a mutex. + ccw *ccWrapper // ClientConn interface passed to child LB. + bOpts balancer.BuildOptions // BuildOptions passed to child LB. + childConfigParser balancer.ConfigParser // Config parser for cluster_resolver LB policy. + logger *grpclog.PrefixLogger // Prefix logger for all logging. + xdsCredsInUse bool + + xdsHIPtr *unsafe.Pointer // Accessed atomically. + + // The serializer and its cancel func are initialized at build time, and the + // rest of the fields here are only accessed from serializer callbacks (or + // from balancer.Balancer methods, which themselves are guaranteed to be + // mutually exclusive) and hence do not need to be guarded by a mutex. + serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client. + serializerCancel context.CancelFunc // Stops the above serializer. + childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources. + watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name. + lbCfg *lbConfig // Current load balancing configuration. + + // The certificate providers are cached here to that they can be closed when + // a new provider is to be created. + cachedRoot certprovider.Provider + cachedIdentity certprovider.Provider +} + +// handleSecurityConfig processes the security configuration received from the +// management server, creates appropriate certificate provider plugins, and +// updates the HandshakeInfo which is added as an address attribute in +// NewSubConn() calls. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { + // If xdsCredentials are not in use, i.e, the user did not want to get + // security configuration from an xDS server, we should not be acting on the + // received security config here. Doing so poses a security threat. + if !b.xdsCredsInUse { + return nil + } + var xdsHI *xdsinternal.HandshakeInfo + + // Security config being nil is a valid case where the management server has + // not sent any security configuration. The xdsCredentials implementation + // handles this by delegating to its fallback credentials. + if config == nil { + // We need to explicitly set the fields to nil here since this might be + // a case of switching from a good security configuration to an empty + // one where fallback credentials are to be used. + xdsHI = xdsinternal.NewHandshakeInfo(nil, nil, nil, false) + atomic.StorePointer(b.xdsHIPtr, unsafe.Pointer(xdsHI)) + return nil + + } + + // A root provider is required whether we are using TLS or mTLS. + cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs() + rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true) + if err != nil { + return err + } + + // The identity provider is only present when using mTLS. + var identityProvider certprovider.Provider + if name, cert := config.IdentityInstanceName, config.IdentityCertName; name != "" { + var err error + identityProvider, err = buildProvider(cpc, name, cert, true, false) + if err != nil { + return err + } + } + + // Close the old providers and cache the new ones. + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.cachedRoot = rootProvider + b.cachedIdentity = identityProvider + xdsHI = xdsinternal.NewHandshakeInfo(rootProvider, identityProvider, config.SubjectAltNameMatchers, false) + atomic.StorePointer(b.xdsHIPtr, unsafe.Pointer(xdsHI)) + return nil +} + +func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanceName, certName string, wantIdentity, wantRoot bool) (certprovider.Provider, error) { + cfg, ok := configs[instanceName] + if !ok { + // Defensive programming. If a resource received from the management + // server contains a certificate provider instance name that is not + // found in the bootstrap, the resource is NACKed by the xDS client. + return nil, fmt.Errorf("certificate provider instance %q not found in bootstrap file", instanceName) + } + provider, err := cfg.Build(certprovider.BuildOptions{ + CertName: certName, + WantIdentity: wantIdentity, + WantRoot: wantRoot, + }) + if err != nil { + // This error is not expected since the bootstrap process parses the + // config and makes sure that it is acceptable to the plugin. Still, it + // is possible that the plugin parses the config successfully, but its + // Build() method errors out. + return nil, fmt.Errorf("xds: failed to get security plugin instance (%+v): %v", cfg, err) + } + return provider, nil +} + +// A convenience method to create a watcher for cluster `name`. It also +// registers the watch with the xDS client, and adds the newly created watcher +// to the list of watchers maintained by the LB policy. +func (b *cdsBalancer) createAndAddWatcherForCluster(name string) { + w := &clusterWatcher{ + name: name, + parent: b, + } + ws := &watcherState{ + watcher: w, + cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w), + } + b.watchers[name] = ws +} + +// UpdateClientConnState receives the serviceConfig (which contains the +// clusterName to watch for in CDS) and the xdsClient object from the +// xdsResolver. +func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + b.logger.Warningf("Received balancer config with no xDS client") + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) + + // The errors checked here should ideally never happen because the + // ServiceConfig in this case is prepared by the xdsResolver and is not + // something that is received on the wire. + lbCfg, ok := state.BalancerConfig.(*lbConfig) + if !ok { + b.logger.Warningf("Received unexpected balancer config type: %T", state.BalancerConfig) + return balancer.ErrBadResolverState + } + if lbCfg.ClusterName == "" { + b.logger.Warningf("Received balancer config with no cluster name") + return balancer.ErrBadResolverState + } + + // Do nothing and return early if configuration has not changed. + if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName { + return nil + } + b.lbCfg = lbCfg + + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + // A config update with a changed top-level cluster name means that none + // of our old watchers make any sense any more. + b.closeAllWatchers() + + // Create a new watcher for the top-level cluster. Upon resolution, it + // could end up creating more watchers if turns out to be an aggregate + // cluster. + b.createAndAddWatcherForCluster(lbCfg.ClusterName) + errCh <- nil + } + onFailure := func() { + // The call to Schedule returns false *only* if the serializer has been + // closed, which happens only when we receive an update after close. + errCh <- errBalancerClosed + } + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *cdsBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + // Resource not found error is reported by the resolver when the + // top-level cluster resource is removed by the management server. + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.closeAllWatchers() + } + var root string + if b.lbCfg != nil { + root = b.lbCfg.ClusterName + } + b.onClusterError(root, err) + }) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// Closes all registered cluster watchers and removes them from the internal map. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) closeAllWatchers() { + for name, state := range b.watchers { + state.cancelWatch() + delete(b.watchers, name) + } +} + +// Close cancels the CDS watch, closes the child policy and closes the +// cdsBalancer. +func (b *cdsBalancer) Close() { + b.serializer.TrySchedule(func(context.Context) { + b.closeAllWatchers() + + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() +} + +func (b *cdsBalancer) ExitIdle() { + b.serializer.TrySchedule(func(context.Context) { + if b.childLB == nil { + b.logger.Warningf("Received ExitIdle with no child policy") + return + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + }) +} + +// Handles a good Cluster update from the xDS client. Kicks off the discovery +// mechanism generation process from the top-level cluster and if the cluster +// graph is resolved, generates child policy config and pushes it down. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpdate) { + state := b.watchers[name] + if state == nil { + // We are currently not watching this cluster anymore. Return early. + return + } + + b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update)) + + // Update the watchers map with the update for the cluster. + state.lastUpdate = &update + + // For an aggregate cluster, always use the security configuration on the + // root cluster. + if name == b.lbCfg.ClusterName { + // Process the security config from the received update before building the + // child policy or forwarding the update to it. We do this because the child + // policy may try to create a new subConn inline. Processing the security + // configuration here and setting up the handshakeInfo will make sure that + // such attempts are handled properly. + if err := b.handleSecurityConfig(update.SecurityCfg); err != nil { + // If the security config is invalid, for example, if the provider + // instance is not found in the bootstrap config, we need to put the + // channel in transient failure. + b.onClusterError(name, fmt.Errorf("received Cluster resource contains invalid security config: %v", err)) + return + } + } + + clustersSeen := make(map[string]bool) + dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen) + if err != nil { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("failed to generate discovery mechanisms: %v", err)) + return + } + if ok { + if len(dms) == 0 { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("aggregate cluster graph has no leaf clusters")) + return + } + // Child policy is built the first time we resolve the cluster graph. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) + if err != nil { + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) + return + } + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + // Prepare the child policy configuration, convert it to JSON, have it + // parsed by the child policy to convert it into service config and push + // an update to it. + childCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, + // The LB policy is configured by the root cluster. + XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy, + } + cfgJSON, err := json.Marshal(childCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg) + return + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err) + return + } + + ccState := balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), + BalancerConfig: sc, + } + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) + } + } + // We no longer need the clusters that we did not see in this iteration of + // generateDMsForCluster(). + for cluster := range clustersSeen { + state, ok := b.watchers[cluster] + if ok { + continue + } + state.cancelWatch() + delete(b.watchers, cluster) + } +} + +// Handles an error Cluster update from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterError(name string, err error) { + b.logger.Warningf("Cluster resource %q received error update: %v", name, err) + + if b.childLB != nil { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } + } else { + // If child balancer was never created, fail the RPCs with + // errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("%q: %v", name, err)), + }) + } +} + +// Handles a resource-not-found error from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterResourceNotFound(name string) { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", name) + if b.childLB != nil { + b.childLB.ResolverError(err) + } else { + // If child balancer was never created, fail the RPCs with errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + } +} + +// Generates discovery mechanisms for the cluster graph rooted at `name`. This +// method is called recursively if `name` corresponds to an aggregate cluster, +// with the base case for recursion being a leaf cluster. If a new cluster is +// encountered when traversing the graph, a watcher is created for it. +// +// Inputs: +// - name: name of the cluster to start from +// - depth: recursion depth of the current cluster, starting from root +// - dms: prioritized list of current discovery mechanisms +// - clustersSeen: cluster names seen so far in the graph traversal +// +// Outputs: +// - new prioritized list of discovery mechanisms +// - boolean indicating if traversal of the aggregate cluster graph is +// complete. If false, the above list of discovery mechanisms is ignored. +// - error indicating if any error was encountered as part of the graph +// traversal. If error is non-nil, the other return values are ignored. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) { + if depth >= aggregateClusterMaxDepth { + return dms, false, errExceedsMaxDepth + } + + if clustersSeen[name] { + // Discovery mechanism already seen through a different branch. + return dms, true, nil + } + clustersSeen[name] = true + + state, ok := b.watchers[name] + if !ok { + // If we have not seen this cluster so far, create a watcher for it, add + // it to the map, start the watch and return. + b.createAndAddWatcherForCluster(name) + + // And since we just created the watcher, we know that we haven't + // resolved the cluster graph yet. + return dms, false, nil + } + + // A watcher exists, but no update has been received yet. + if state.lastUpdate == nil { + return dms, false, nil + } + + var dm clusterresolver.DiscoveryMechanism + cluster := state.lastUpdate + switch cluster.ClusterType { + case xdsresource.ClusterTypeAggregate: + // This boolean is used to track if any of the clusters in the graph is + // not yet completely resolved or returns errors, thereby allowing us to + // traverse as much of the graph as possible (and start the associated + // watches where required) to ensure that clustersSeen contains all + // clusters in the graph that we can traverse to. + missingCluster := false + var err error + for _, child := range cluster.PrioritizedClusterNames { + var ok bool + dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen) + if err != nil || !ok { + missingCluster = true + } + } + return dms, !missingCluster, err + case xdsresource.ClusterTypeEDS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cluster.ClusterName, + EDSServiceName: cluster.EDSServiceName, + MaxConcurrentRequests: cluster.MaxRequests, + LoadReportingServer: cluster.LRSServerConfig, + } + case xdsresource.ClusterTypeLogicalDNS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cluster.ClusterName, + DNSHostname: cluster.DNSHostName, + } + } + odJSON := cluster.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dm.OutlierDetection = odJSON + + dm.TelemetryLabels = cluster.TelemetryLabels + + return append(dms, dm), true, nil +} + +// ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at +// creation and intercepts the NewSubConn() and UpdateAddresses() call from the +// child policy to add security configuration required by xDS credentials. +// +// Other methods of the balancer.ClientConn interface are not overridden and +// hence get the original implementation. +type ccWrapper struct { + balancer.ClientConn + + xdsHIPtr *unsafe.Pointer +} + +// NewSubConn intercepts NewSubConn() calls from the child policy and adds an +// address attribute which provides all information required by the xdsCreds +// handshaker to perform the TLS handshake. +func (ccw *ccWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeInfo(addr, ccw.xdsHIPtr) + } + + // No need to override opts.StateListener; just forward all calls to the + // child that created the SubConn. + return ccw.ClientConn.NewSubConn(newAddrs, opts) +} + +func (ccw *ccWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeInfo(addr, ccw.xdsHIPtr) + } + ccw.ClientConn.UpdateAddresses(sc, newAddrs) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go new file mode 100644 index 000000000..835461d09 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -0,0 +1,55 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is +// passed to the xDS client as part of the WatchResource() API. +// +// It watches a single cluster and handles callbacks from the xDS client by +// scheduling them on the parent LB policy's serializer. +type clusterWatcher struct { + name string + parent *cdsBalancer +} + +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone() } + cw.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone() } + cw.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone() } + cw.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +// watcherState groups the state associated with a clusterWatcher. +type watcherState struct { + watcher *clusterWatcher // The underlying watcher. + cancelWatch func() // Cancel func to cancel the watch. + lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster. +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go new file mode 100644 index 000000000..e179bb1bf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package cdsbalancer + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[cds-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *cdsBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go new file mode 100644 index 000000000..0dc71dfed --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -0,0 +1,480 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterimpl implements the xds_cluster_impl balancing policy. It +// handles the cluster features (e.g. circuit_breaking, RPC dropping). +// +// Note that it doesn't handle name resolution, which is done by policy +// xds_cluster_resolver. +package clusterimpl + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/loadstore" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +const ( + // Name is the name of the cluster_impl balancer. + Name = "xds_cluster_impl_experimental" + defaultRequestCountMax = 1024 +) + +var ( + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + ctx, cancel := context.WithCancel(context.Background()) + b := &clusterImplBalancer{ + ClientConn: cc, + bOpts: bOpts, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + b.logger = prefixLogger(b) + b.child = gracefulswitch.NewBalancer(b, bOpts) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type clusterImplBalancer struct { + balancer.ClientConn + + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsclient.XDSClient + + config *LBConfig + child *gracefulswitch.Balancer + cancelLoadReport func() + edsServiceName string + lrsServer *bootstrap.ServerConfig + loadWrapper *loadstore.Wrapper + + clusterNameMu sync.Mutex + clusterName string + + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 + telemetryLabels map[string]string +} + +// updateLoadStore checks the config for load store, and decides whether it +// needs to restart the load reporting stream. +func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { + var updateLoadClusterAndService bool + + // ClusterName is different, restart. ClusterName is from ClusterName and + // EDSServiceName. + clusterName := b.getClusterName() + if clusterName != newConfig.Cluster { + updateLoadClusterAndService = true + b.setClusterName(newConfig.Cluster) + clusterName = newConfig.Cluster + } + if b.edsServiceName != newConfig.EDSServiceName { + updateLoadClusterAndService = true + b.edsServiceName = newConfig.EDSServiceName + } + if updateLoadClusterAndService { + // This updates the clusterName and serviceName that will be reported + // for the loads. The update here is too early, the perfect timing is + // when the picker is updated with the new connection. But from this + // balancer's point of view, it's impossible to tell. + // + // On the other hand, this will almost never happen. Each LRS policy + // shouldn't get updated config. The parent should do a graceful switch + // when the clusterName or serviceName is changed. + b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) + } + + var ( + stopOldLoadReport bool + startNewLoadReport bool + ) + + // Check if it's necessary to restart load report. + if b.lrsServer == nil { + if newConfig.LoadReportingServer != nil { + // Old is nil, new is not nil, start new LRS. + b.lrsServer = newConfig.LoadReportingServer + startNewLoadReport = true + } + // Old is nil, new is nil, do nothing. + } else if newConfig.LoadReportingServer == nil { + // Old is not nil, new is nil, stop old, don't start new. + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + } else { + // Old is not nil, new is not nil, compare string values, if + // different, stop old and start new. + if !b.lrsServer.Equal(newConfig.LoadReportingServer) { + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + startNewLoadReport = true + } + } + + if stopOldLoadReport { + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + if !startNewLoadReport { + // If a new LRS stream will be started later, no need to update + // it to nil here. + b.loadWrapper.UpdateLoadStore(nil) + } + } + } + if startNewLoadReport { + var loadStore *load.Store + if b.xdsClient != nil { + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServer) + } + b.loadWrapper.UpdateLoadStore(loadStore) + } + + return nil +} + +func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { + if b.logger.V(2) { + b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // Need to check for potential errors at the beginning of this function, so + // that on errors, we reject the whole config, instead of applying part of + // it. + bb := balancer.Get(newConfig.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("child policy %q not registered", newConfig.ChildPolicy.Name) + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + + // Update load reporting config. This needs to be done before updating the + // child policy because we need the loadStore from the updated client to be + // passed to the ccWrapper, so that the next picker from the child policy + // will pick up the new loadStore. + if err := b.updateLoadStore(newConfig); err != nil { + return err + } + + if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { + if err := b.child.SwitchTo(bb); err != nil { + return fmt.Errorf("error switching to child of type %q: %v", newConfig.ChildPolicy.Name, err) + } + } + b.config = newConfig + + b.telemetryLabels = newConfig.TelemetryLabels + dc := b.handleDropAndRequestCount(newConfig) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(dc), + }) + } + + // Addresses and sub-balancer config are sent to sub-balancer. + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.config.ChildPolicy.Config, + }) +} + +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + errCh <- b.updateClientConnState(s) + } + onFailure := func() { + // An attempt to schedule callback fails only when an update is received + // after Close(). + errCh <- errBalancerClosed + } + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh +} + +func (b *clusterImplBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + b.child.ResolverError(err) + }) +} + +func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { + // Trigger re-resolution when a SubConn turns transient failure. This is + // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. + // + // Note that this happens not only for the addresses from DNS, but also for + // EDS (cluster_impl doesn't know if it's DNS or EDS, only the parent + // knows). The parent priority policy is configured to ignore re-resolution + // signal from the EDS children. + if s.ConnectivityState == connectivity.TransientFailure { + b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + } + + if cb != nil { + cb(s) + } +} + +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, s) +} + +func (b *clusterImplBalancer) Close() { + b.serializer.TrySchedule(func(_ context.Context) { + b.child.Close() + b.childState = balancer.State{} + + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() +} + +func (b *clusterImplBalancer) ExitIdle() { + b.serializer.TrySchedule(func(context.Context) { + b.child.ExitIdle() + }) +} + +// Override methods to accept updates from the child LB. + +func (b *clusterImplBalancer) UpdateState(state balancer.State) { + b.serializer.TrySchedule(func(context.Context) { + b.childState = state + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }), + }) + }) +} + +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + b.clusterName = n +} + +func (b *clusterImplBalancer) getClusterName() string { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + return b.clusterName +} + +// scWrapper is a wrapper of SubConn with locality ID. The locality ID can be +// retrieved from the addresses when creating SubConn. +// +// All SubConns passed to the child policies are wrapped in this, so that the +// picker can get the localityID from the picked SubConn, and do load reporting. +// +// After wrapping, all SubConns to and from the parent ClientConn (e.g. for +// SubConn state update, update/remove SubConn) must be the original SubConns. +// All SubConns to and from the child policy (NewSubConn, forwarding SubConn +// state update) must be the wrapper. The balancer keeps a map from the original +// SubConn to the wrapper for this purpose. +type scWrapper struct { + balancer.SubConn + // locality needs to be atomic because it can be updated while being read by + // the picker. + locality atomic.Value // type xdsinternal.LocalityID +} + +func (scw *scWrapper) updateLocalityID(lID xdsinternal.LocalityID) { + scw.locality.Store(lID) +} + +func (scw *scWrapper) localityID() xdsinternal.LocalityID { + lID, _ := scw.locality.Load().(xdsinternal.LocalityID) + return lID +} + +func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) + } + var sc balancer.SubConn + scw := &scWrapper{} + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { + b.serializer.TrySchedule(func(context.Context) { + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) + } + return + } + scw.updateLocalityID(lID) + }) + } + sc, err := b.ClientConn.NewSubConn(newAddrs, opts) + if err != nil { + return nil, err + } + scw.SubConn = sc + return scw, nil +} + +func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + if scw, ok := sc.(*scWrapper); ok { + scw.updateLocalityID(lID) + // Need to get the original SubConn from the wrapper before calling + // parent ClientConn. + sc = scw.SubConn + } + b.ClientConn.UpdateAddresses(sc, newAddrs) +} + +type dropConfigs struct { + drops []*dropper + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 +} + +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go new file mode 100644 index 000000000..134a568e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/serviceconfig" +) + +// DropConfig contains the category, and drop ratio. +type DropConfig struct { + Category string + RequestsPerMillion uint32 +} + +// LBConfig is the balancer config for cluster_impl balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + // TelemetryLabels are the telemetry Labels associated with this cluster. + TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +func equalDropCategories(a, b []DropConfig) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go new file mode 100644 index 000000000..3bbd1b0d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-cluster-impl-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clusterImplBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go new file mode 100644 index 000000000..fbadbb92b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "context" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// NewRandomWRR is used when calculating drops. It's exported so that tests can +// override it. +var NewRandomWRR = wrr.NewRandom + +const million = 1000000 + +type dropper struct { + category string + w wrr.WRR +} + +// greatest common divisor (GCD) via Euclidean algorithm +func gcd(a, b uint32) uint32 { + for b != 0 { + t := b + b = a % b + a = t + } + return a +} + +func newDropper(c DropConfig) *dropper { + w := NewRandomWRR() + gcdv := gcd(c.RequestsPerMillion, million) + // Return true for RequestPerMillion, false for the rest. + w.Add(true, int64(c.RequestsPerMillion/gcdv)) + w.Add(false, int64((million-c.RequestsPerMillion)/gcdv)) + + return &dropper{ + category: c.Category, + w: w, + } +} + +func (d *dropper) drop() (ret bool) { + return d.w.Next().(bool) +} + +// loadReporter wraps the methods from the loadStore that are used here. +type loadReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) + CallDropped(locality string) +} + +// Picker implements RPC drop, circuit breaking drop and load reporting. +type picker struct { + drops []*dropper + s balancer.State + loadStore loadReporter + counter *xdsclient.ClusterRequestsCounter + countMax uint32 + telemetryLabels map[string]string +} + +func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { + return &picker{ + drops: config.drops, + s: b.childState, + loadStore: b.loadWrapper, + counter: config.requestCounter, + countMax: config.requestCountMax, + telemetryLabels: b.telemetryLabels, + } +} + +func telemetryLabels(ctx context.Context) map[string]string { + if ctx == nil { + return nil + } + labels := stats.GetLabels(ctx) + if labels == nil { + return nil + } + return labels.TelemetryLabels +} + +func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Unconditionally set labels if present, even dropped or queued RPC's can + // use these labels. + if labels := telemetryLabels(info.Ctx); labels != nil { + for key, value := range d.telemetryLabels { + labels[key] = value + } + } + + // Don't drop unless the inner picker is READY. Similar to + // https://github.com/grpc/grpc-go/issues/2622. + if d.s.ConnectivityState == connectivity.Ready { + // Check if this RPC should be dropped by category. + for _, dp := range d.drops { + if dp.drop() { + if d.loadStore != nil { + d.loadStore.CallDropped(dp.category) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") + } + } + } + + // Check if this RPC should be dropped by circuit breaking. + if d.counter != nil { + if err := d.counter.StartRequest(d.countMax); err != nil { + // Drops by circuit breaking are reported with empty category. They + // will be reported only in total drops, but not in per category. + if d.loadStore != nil { + d.loadStore.CallDropped("") + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) + } + } + + var lIDStr string + pr, err := d.s.Picker.Pick(info) + if scw, ok := pr.SubConn.(*scWrapper); ok { + // This OK check also covers the case err!=nil, because SubConn will be + // nil. + pr.SubConn = scw.SubConn + var e error + // If locality ID isn't found in the wrapper, an empty locality ID will + // be used. + lIDStr, e = scw.localityID().ToString() + if e != nil { + logger.Infof("failed to marshal LocalityID: %#v, loads won't be reported", scw.localityID()) + } + } + + if err != nil { + if d.counter != nil { + // Release one request count if this pick fails. + d.counter.EndRequest() + } + return pr, err + } + + if labels := telemetryLabels(info.Ctx); labels != nil { + labels["grpc.lb.locality"] = lIDStr + } + + if d.loadStore != nil { + d.loadStore.CallStarted(lIDStr) + oldDone := pr.Done + pr.Done = func(info balancer.DoneInfo) { + if oldDone != nil { + oldDone(info) + } + d.loadStore.CallFinished(lIDStr, info.Err) + + load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) + if !ok || load == nil { + return + } + for n, c := range load.NamedMetrics { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + } + } + + if d.counter != nil { + // Update Done() so that when the RPC finishes, the request count will + // be released. + oldDone := pr.Done + pr.Done = func(doneInfo balancer.DoneInfo) { + d.counter.EndRequest() + if oldDone != nil { + oldDone(doneInfo) + } + } + } + + return pr, err +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go new file mode 100644 index 000000000..92c69f5e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" +) + +type subBalancerState struct { + state balancer.State + // stateToAggregate is the connectivity state used only for state + // aggregation. It could be different from state.ConnectivityState. For + // example when a sub-balancer transitions from TransientFailure to + // connecting, state.ConnectivityState is Connecting, but stateToAggregate + // is still TransientFailure. + stateToAggregate connectivity.State +} + +func (s *subBalancerState) String() string { + return fmt.Sprintf("picker:%p,state:%v,stateToAggregate:%v", s.state.Picker, s.state.ConnectivityState, s.stateToAggregate) +} + +type balancerStateAggregator struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + csEval *balancer.ConnectivityStateEvaluator + + mu sync.Mutex + // This field is used to ensure that no updates are forwarded to the parent + // CC once the aggregator is closed. A closed sub-balancer could still send + // pickers to this aggregator. + closed bool + // Map from child policy name to last reported state. + idToPickerState map[string]*subBalancerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool +} + +func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLogger) *balancerStateAggregator { + return &balancerStateAggregator{ + cc: cc, + logger: logger, + csEval: &balancer.ConnectivityStateEvaluator{}, + idToPickerState: make(map[string]*subBalancerState), + } +} + +func (bsa *balancerStateAggregator) close() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.closed = true +} + +// add adds a sub-balancer in CONNECTING state. +// +// This is called when there's a new child. +func (bsa *balancerStateAggregator) add(id string) { + bsa.mu.Lock() + defer bsa.mu.Unlock() + + bsa.idToPickerState[id] = &subBalancerState{ + // Start everything in CONNECTING, so if one of the sub-balancers + // reports TransientFailure, the RPCs will still wait for the other + // sub-balancers. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + stateToAggregate: connectivity.Connecting, + } + bsa.csEval.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + bsa.buildAndUpdateLocked() +} + +// remove removes the sub-balancer state. Future updates from this sub-balancer, +// if any, will be ignored. +// +// This is called when a child is removed. +func (bsa *balancerStateAggregator) remove(id string) { + bsa.mu.Lock() + defer bsa.mu.Unlock() + if _, ok := bsa.idToPickerState[id]; !ok { + return + } + // Setting the state of the deleted sub-balancer to Shutdown will get + // csEvltr to remove the previous state for any aggregated state + // evaluations. Transitions to and from connectivity.Shutdown are ignored + // by csEvltr. + bsa.csEval.RecordTransition(bsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) + // Remove id and picker from picker map. This also results in future updates + // for this ID to be ignored. + delete(bsa.idToPickerState, id) + bsa.buildAndUpdateLocked() +} + +// pauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (bsa *balancerStateAggregator) pauseStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = true + bsa.needUpdateStateOnResume = false +} + +// resumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (bsa *balancerStateAggregator) resumeStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = false + if bsa.needUpdateStateOnResume { + bsa.cc.UpdateState(bsa.buildLocked()) + } +} + +// UpdateState is called to report a balancer state change from sub-balancer. +// It's usually called by the balancer group. +// +// It calls parent ClientConn's UpdateState with the new aggregated state. +func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) { + bsa.logger.Infof("State update from sub-balancer %q: %+v", id, state) + + bsa.mu.Lock() + defer bsa.mu.Unlock() + pickerSt, ok := bsa.idToPickerState[id] + if !ok { + // All state starts with an entry in pickStateMap. If ID is not in map, + // it's either removed, or never existed. + return + } + if !(pickerSt.state.ConnectivityState == connectivity.TransientFailure && state.ConnectivityState == connectivity.Connecting) { + // If old state is TransientFailure, and new state is Connecting, don't + // update the state, to prevent the aggregated state from being always + // CONNECTING. Otherwise, stateToAggregate is the same as + // state.ConnectivityState. + bsa.csEval.RecordTransition(pickerSt.stateToAggregate, state.ConnectivityState) + pickerSt.stateToAggregate = state.ConnectivityState + } + pickerSt.state = state + bsa.buildAndUpdateLocked() +} + +// buildAndUpdateLocked combines the sub-state from each sub-balancer into one +// state, and sends a picker update to the parent ClientConn. +func (bsa *balancerStateAggregator) buildAndUpdateLocked() { + if bsa.closed { + return + } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } + bsa.cc.UpdateState(bsa.buildLocked()) +} + +// buildLocked combines sub-states into one. +func (bsa *balancerStateAggregator) buildLocked() balancer.State { + // The picker's return error might not be consistent with the + // aggregatedState. Because for this LB policy, we want to always build + // picker with all sub-pickers (not only ready sub-pickers), so even if the + // overall state is Ready, pick for certain RPCs can behave like Connecting + // or TransientFailure. + bsa.logger.Infof("Child pickers: %+v", bsa.idToPickerState) + return balancer.State{ + ConnectivityState: bsa.csEval.CurrentState(), + Picker: newPickerGroup(bsa.idToPickerState), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go new file mode 100644 index 000000000..e6d751ecb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -0,0 +1,205 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clustermanager implements the cluster manager LB policy for xds. +package clustermanager + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancergroup" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const balancerName = "xds_cluster_manager_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + b := &bal{} + b.logger = prefixLogger(b) + b.stateAggregator = newBalancerStateAggregator(cc, b.logger) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: opts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + b.bg.Start() + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return balancerName +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type bal struct { + logger *internalgrpclog.PrefixLogger + bg *balancergroup.BalancerGroup + stateAggregator *balancerStateAggregator + + children map[string]childConfig +} + +func (b *bal) setErrorPickerForChild(childName string, err error) { + b.stateAggregator.UpdateState(childName, balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) error { + // TODO: Get rid of handling hierarchy in addresses. This LB policy never + // gets addresses from the resolver. + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + // Remove sub-balancers that are not in the new list from the aggregator and + // balancergroup. + for name := range b.children { + if _, ok := newConfig.Children[name]; !ok { + b.stateAggregator.remove(name) + b.bg.Remove(name) + } + } + + var retErr error + for childName, childCfg := range newConfig.Children { + lbCfg := childCfg.ChildPolicy.Config + if _, ok := b.children[childName]; !ok { + // Add new sub-balancers to the aggregator and balancergroup. + b.stateAggregator.add(childName) + b.bg.Add(childName, balancer.Get(childCfg.ChildPolicy.Name)) + } else { + // If the child policy type has changed for existing sub-balancers, + // parse the new config and send down the config update to the + // balancergroup, which will take care of gracefully switching the + // child over to the new policy. + // + // If we run into errors here, we need to ensure that RPCs to this + // child fail, while RPCs to other children with good configs + // continue to succeed. + newPolicyName, oldPolicyName := childCfg.ChildPolicy.Name, b.children[childName].ChildPolicy.Name + if newPolicyName != oldPolicyName { + var err error + var cfgJSON []byte + cfgJSON, err = childCfg.ChildPolicy.MarshalJSON() + if err != nil { + retErr = fmt.Errorf("failed to JSON marshal load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } + // This overwrites lbCfg to be in the format expected by the + // gracefulswitch balancer. So, when this config is pushed to + // the child (below), it will result in a graceful switch to the + // new child policy. + lbCfg, err = balancergroup.ParseConfig(cfgJSON) + if err != nil { + retErr = fmt.Errorf("failed to parse load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } + } + } + + if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addressesSplit[childName], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }, + BalancerConfig: lbCfg, + }); err != nil { + retErr = fmt.Errorf("failed to push new configuration %v to child %q", childCfg.ChildPolicy.Config, childName) + b.setErrorPickerForChild(childName, retErr) + } + + // Picker update is sent to the parent ClientConn only after the + // new child policy returns a picker. So, there is no need to + // set needUpdateStateOnResume to true here. + } + + b.children = newConfig.Children + + // If multiple sub-balancers run into errors, we will return only the last + // one, which is still good enough, since the grpc channel will anyways + // return this error as balancer.ErrBadResolver to the name resolver, + // resulting in re-resolution attempts. + return retErr + + // Adding or removing a sub-balancer will result in the + // needUpdateStateOnResume bit to true which results in a picker update once + // resumeStateUpdates() is called. +} + +func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { + newConfig, ok := s.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + + b.stateAggregator.pauseStateUpdates() + defer b.stateAggregator.resumeStateUpdates() + return b.updateChildren(s, newConfig) +} + +func (b *bal) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *bal) Close() { + b.stateAggregator.close() + b.bg.Close() + b.logger.Infof("Shutdown") +} + +func (b *bal) ExitIdle() { + b.bg.ExitIdle() +} + +const prefix = "[xds-cluster-manager-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *bal) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go new file mode 100644 index 000000000..3a5625ff1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type childConfig struct { + // ChildPolicy is the child policy and it's config. + ChildPolicy *internalserviceconfig.BalancerConfig +} + +// lbConfig is the balancer config for xds routing policy. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + Children map[string]childConfig +} + +func parseConfig(c json.RawMessage) (*lbConfig, error) { + cfg := &lbConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go new file mode 100644 index 000000000..015cd2b7a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// pickerGroup contains a list of pickers. If the picker isn't ready, the pick +// will be queued. +type pickerGroup struct { + pickers map[string]balancer.Picker +} + +func newPickerGroup(idToPickerState map[string]*subBalancerState) *pickerGroup { + pickers := make(map[string]balancer.Picker) + for id, st := range idToPickerState { + pickers[id] = st.state.Picker + } + return &pickerGroup{ + pickers: pickers, + } +} + +func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + cluster := getPickedCluster(info.Ctx) + if p := pg.pickers[cluster]; p != nil { + return p.Pick(info) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "unknown cluster selected for RPC: %q", cluster) +} + +type clusterKey struct{} + +func getPickedCluster(ctx context.Context) string { + cluster, _ := ctx.Value(clusterKey{}).(string) + return cluster +} + +// GetPickedClusterForTesting returns the cluster in the context; to be used +// for testing only. +func GetPickedClusterForTesting(ctx context.Context) string { + return getPickedCluster(ctx) +} + +// SetPickedCluster adds the selected cluster to the context for the +// xds_cluster_manager LB policy to pick. +func SetPickedCluster(ctx context.Context, cluster string) context.Context { + return context.WithValue(ctx, clusterKey{}, cluster) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go new file mode 100644 index 000000000..749945059 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -0,0 +1,400 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterresolver contains the implementation of the +// cluster_resolver_experimental LB policy which resolves endpoint addresses +// using a list of one or more discovery mechanisms. +package clusterresolver + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/nop" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Name is the name of the cluster_resolver balancer. +const Name = "cluster_resolver_experimental" + +var ( + errBalancerClosed = errors.New("cdsBalancer is closed") + newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { + return bb.Build(cc, o) + } +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +// Build helps implement the balancer.Builder interface. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityBuilder := balancer.Get(priority.Name) + if priorityBuilder == nil { + logger.Errorf("%q LB policy is needed but not registered", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) + } + priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) + if !ok { + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) + } + + b := &clusterResolverBalancer{ + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + + priorityBuilder: priorityBuilder, + priorityConfigParser: priorityConfigParser, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + b.resourceWatcher = newResourceResolver(b, b.logger) + b.cc = &ccWrapper{ + ClientConn: cc, + b: b, + resourceWatcher: b.resourceWatcher, + } + + go b.run() + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) + } + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) + } + + var cfg *LBConfig + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) + } + + for i, dm := range cfg.DiscoveryMechanisms { + lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) + if err != nil { + return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) + } + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier Detection + // builder pulled from gRPC LB Registry. + return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg + } + if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err) + } + return cfg, nil +} + +// ccUpdate wraps a clientConn update received from gRPC. +type ccUpdate struct { + state balancer.ClientConnState + err error +} + +type exitIdle struct{} + +// clusterResolverBalancer resolves endpoint addresses using a list of one or +// more discovery mechanisms. +type clusterResolverBalancer struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + resourceWatcher *resourceResolver + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + + priorityBuilder balancer.Builder + priorityConfigParser balancer.ConfigParser + + config *LBConfig + configRaw *serviceconfig.ParseResult + xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. + attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. + + child balancer.Balancer + priorities []priorityConfig + watchUpdateReceived bool +} + +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. +// +// A good update results in creation of endpoint resolvers for the configured +// discovery mechanisms. An update with an error results in cancellation of any +// existing endpoint resolution and propagation of the same to the child policy. +func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, true) + return + } + + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + cfg, _ := update.state.BalancerConfig.(*LBConfig) + if cfg == nil { + b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) + return + } + + b.config = cfg + b.configRaw = update.state.ResolverState.ServiceConfig + b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + + // The child policy is created only after all configured discovery + // mechanisms have been successfully returned endpoints. If that is not the + // case, we return early. + if !b.watchUpdateReceived { + return + } + b.updateChildConfig() +} + +// handleResourceUpdate handles a resource update or error from the resource +// resolver by propagating the same to the child LB policy. +func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { + b.watchUpdateReceived = true + b.priorities = update.priorities + + // An update from the resource resolver contains resolved endpoint addresses + // for all configured discovery mechanisms ordered by priority. This is used + // to generate configuration for the priority LB policy. + b.updateChildConfig() + + if update.onDone != nil { + update.onDone() + } +} + +// updateChildConfig builds child policy configuration using endpoint addresses +// returned by the resource resolver and child policy configuration provided by +// parent LB policy. +// +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *clusterResolverBalancer) updateChildConfig() { + if b.child == nil { + b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) + } + + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) + if err != nil { + b.logger.Warningf("Failed to build child policy config: %v", err) + return + } + childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) + if err != nil { + b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) + return + } + if b.logger.V(2) { + b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) + } + + endpoints := make([]resolver.Endpoint, len(addrs)) + for i, a := range addrs { + endpoints[i].Attributes = a.BalancerAttributes + endpoints[i].Addresses = []resolver.Address{a} + endpoints[i].Addresses[0].BalancerAttributes = nil + } + if err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: endpoints, + Addresses: addrs, + ServiceConfig: b.configRaw, + Attributes: b.attrsWithClient, + }, + BalancerConfig: childCfg, + }); err != nil { + b.logger.Warningf("Failed to push config to child policy: %v", err) + } +} + +// handleErrorFromUpdate handles errors from the parent LB policy and endpoint +// resolvers. fromParent is true if error is from the parent LB policy. In both +// cases, the error is propagated to the child policy, if one exists. +func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) + + // A resource-not-found error from the parent LB policy means that the LDS + // or CDS resource was removed. This should result in endpoint resolvers + // being stopped here. + // + // A resource-not-found error from the EDS endpoint resolver means that the + // EDS resource was removed. No action needs to be taken for this, and we + // should continue watching the same EDS resource. + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.resourceWatcher.stop(false) + } + + if b.child != nil { + b.child.ResolverError(err) + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +// run is a long-running goroutine that handles updates from gRPC and endpoint +// resolvers. The methods handling the individual updates simply push them onto +// a channel which is read and acted upon from here. +func (b *clusterResolverBalancer) run() { + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case *ccUpdate: + b.handleClientConnUpdate(update) + case exitIdle: + if b.child == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + } + case u := <-b.resourceWatcher.updateChannel: + b.handleResourceUpdate(u) + + // Close results in stopping the endpoint resolvers and closing the + // underlying child policy and is the only way to exit this goroutine. + case <-b.closed.Done(): + b.resourceWatcher.stop(true) + + if b.child != nil { + b.child.Close() + b.child = nil + } + b.updateCh.Close() + // This is the *ONLY* point of return from this function. + b.logger.Infof("Shutdown") + b.done.Fire() + return + } + } +} + +// Following are methods to implement the balancer interface. + +func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("Received update from gRPC {%+v} after close", state) + return errBalancerClosed + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + b.attrsWithClient = state.ResolverState.Attributes + } + + b.updateCh.Put(&ccUpdate{state: state}) + return nil +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *clusterResolverBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("Received resolver error {%v} after close", err) + return + } + b.updateCh.Put(&ccUpdate{err: err}) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// Close closes the cdsBalancer and the underlying child balancer. +func (b *clusterResolverBalancer) Close() { + b.closed.Fire() + <-b.done.Done() +} + +func (b *clusterResolverBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + +// ccWrapper overrides ResolveNow(), so that re-resolution from the child +// policies will trigger the DNS resolver in cluster_resolver balancer. It +// also intercepts NewSubConn calls in case children don't set the +// StateListener, to allow redirection to happen via this cluster_resolver +// balancer. +type ccWrapper struct { + balancer.ClientConn + b *clusterResolverBalancer + resourceWatcher *resourceResolver +} + +func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { + c.resourceWatcher.resolveNow() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go new file mode 100644 index 000000000..7614b0fc5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "bytes" + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" +) + +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` + // TelemetryLabels are the telemetry labels associated with this cluster. + TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` + outlierDetection outlierdetection.LBConfig +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + od := &dm.outlierDetection + switch { + case dm.Cluster != b.Cluster: + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + case !od.EqualIgnoringChildPolicy(&b.outlierDetection): + return false + } + + if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { + return true + } + if (dm.LoadReportingServer != nil) != (b.LoadReportingServer != nil) { + return false + } + return dm.LoadReportingServer.String() == b.LoadReportingServer.String() +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// LBConfig is the config for cluster resolver balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // DiscoveryMechanisms is an ordered list of discovery mechanisms. + // + // Must have at least one element. Results from each discovery mechanism are + // concatenated together in successive priorities. + DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` + + // XDSLBPolicy specifies the policy for locality picking and endpoint picking. + XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` + xdsLBPolicy internalserviceconfig.BalancerConfig +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go new file mode 100644 index 000000000..8740360ee --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "encoding/json" + "fmt" + "sort" + + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const million = 1000000 + +// priorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. +// +// Each priorityConfig corresponds to one discovery mechanism from the LBConfig +// generated by the CDS balancer. The CDS balancer resolves the cluster name to +// an ordered list of discovery mechanisms (if the top cluster is an aggregated +// cluster), one for each underlying cluster. +type priorityConfig struct { + mechanism DiscoveryMechanism + // edsResp is set only if type is EDS. + edsResp xdsresource.EndpointsUpdate + // addresses is set only if type is DNS. + addresses []string + // Each discovery mechanism has a name generator so that the child policies + // can reuse names between updates (EDS updates for example). + childNameGen *nameGenerator +} + +// buildPriorityConfigJSON builds balancer config for the passed in +// priorities. +// +// The built tree of balancers (see test for the output struct). +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) +// └────────────┘ └────────────┘ +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) + if err != nil { + return nil, nil, fmt.Errorf("failed to build priority config: %v", err) + } + ret, err := json.Marshal(pc) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) + } + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address + ) + for _, p := range priorities { + switch p.mechanism.Type { + case DiscoveryMechanismTypeEDS: + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) + if err != nil { + return nil, nil, err + } + retConfig.Priorities = append(retConfig.Priorities, names...) + retAddrs = append(retAddrs, addrs...) + odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) + for n, c := range odCfgs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + continue + case DiscoveryMechanismTypeLogicalDNS: + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) + retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) + odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + continue + } + } + return retConfig, retAddrs, nil +} + +func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { + odCfgs := make(map[string]*outlierdetection.LBConfig, len(ciCfgs)) + for n, c := range ciCfgs { + odCfgs[n] = makeClusterImplOutlierDetectionChild(c, odCfg) + } + return odCfgs +} + +func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) *outlierdetection.LBConfig { + odCfgRet := odCfg + odCfgRet.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: ciCfg} + return &odCfgRet +} + +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + retAddrs := make([]resolver.Address, 0, len(addrStrs)) + pName := fmt.Sprintf("priority-%v", g.prefix) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + } + return pName, &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + TelemetryLabels: mechanism.TelemetryLabels, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, + }, retAddrs +} + +// buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for +// each priority, sorted by priority, and the addresses for each priority (with +// hierarchy attributes set). +// +// For example, if there are two priorities, the returned values will be +// - ["p0", "p1"] +// - map{"p0":p0_config, "p1":p1_config} +// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] +// - p0 addresses' hierarchy attributes are set to p0 +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { + drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) + for _, d := range edsResp.Drops { + drops = append(drops, clusterimpl.DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + + // Localities of length 0 is triggered by an NACK or resource-not-found + // error before update, or a empty localities list in a update. In either + // case want to create a priority, and send down empty address list, causing + // TF for that priority. "If any discovery mechanism instance experiences an + // error retrieving data, and it has not previously reported any results, it + // should report a result that is a single priority with no endpoints." - + // A37 + priorities := [][]xdsresource.Locality{{}} + if len(edsResp.Localities) != 0 { + priorities = groupLocalitiesByPriority(edsResp.Localities) + } + retNames := g.generate(priorities) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + if err != nil { + return nil, nil, nil, err + } + retConfigs[pName] = cfg + retAddrs = append(retAddrs, addrs...) + } + return retNames, retConfigs, retAddrs, nil +} + +// groupLocalitiesByPriority returns the localities grouped by priority. +// +// The returned list is sorted from higher priority to lower. Each item in the +// list is a group of localities. +// +// For example, for L0-p0, L1-p0, L2-p1, results will be +// - [[L0, L1], [L2]] +func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresource.Locality { + var priorityIntSlice []int + priorities := make(map[int][]xdsresource.Locality) + for _, locality := range localities { + priority := int(locality.Priority) + priorities[priority] = append(priorities[priority], locality) + priorityIntSlice = append(priorityIntSlice, priority) + } + // Sort the priorities based on the int value, deduplicate, and then turn + // the sorted list into a string list. This will be child names, in priority + // order. + sort.Ints(priorityIntSlice) + priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) + ret := make([][]xdsresource.Locality, 0, len(priorityIntSliceDeduped)) + for _, p := range priorityIntSliceDeduped { + ret = append(ret, priorities[p]) + } + return ret +} + +func dedupSortedIntSlice(a []int) []int { + if len(a) == 0 { + return a + } + i, j := 0, 1 + for ; j < len(a); j++ { + if a[i] == a[j] { + continue + } + i++ + if i != j { + a[i] = a[j] + } + } + return a[:i+1] +} + +// priorityLocalitiesToClusterImpl takes a list of localities (with the same +// priority), and generates a cluster impl policy config, and a list of +// addresses with their path hierarchy set to [priority-name, locality-name], so +// priority and the xDS LB Policy know which child policy each address is for. +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + var addrs []resolver.Address + for _, locality := range localities { + var lw uint32 = 1 + if locality.Weight != 0 { + lw = locality.Weight + } + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { + continue + } + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + // "To provide the xds_wrr_locality load balancer information about + // locality weights received from EDS, the cluster resolver will + // populate a new locality weight attribute for each address The + // attribute will have the weight (as an integer) of the locality + // the address is part of." - A52 + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) + var ew uint32 = 1 + if endpoint.Weight != 0 { + ew = endpoint.Weight + } + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) + addrs = append(addrs, addr) + } + } + return &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + TelemetryLabels: mechanism.TelemetryLabels, + DropCategories: drops, + ChildPolicy: xdsLBPolicy, + }, addrs, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go new file mode 100644 index 000000000..119f4c474 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// nameGenerator generates a child name for a list of priorities (each priority +// is a list of localities). +// +// The purpose of this generator is to reuse names between updates. So the +// struct keeps state between generate() calls, and a later generate() might +// return names returned by the previous call. +type nameGenerator struct { + existingNames map[internal.LocalityID]string + prefix uint64 + nextID uint64 +} + +func newNameGenerator(prefix uint64) *nameGenerator { + return &nameGenerator{prefix: prefix} +} + +// generate returns a list of names for the given list of priorities. +// +// Each priority is a list of localities. The name for the priority is picked as +// - for each locality in this priority, if it exists in the existing names, +// this priority will reuse the name +// - if no reusable name is found for this priority, a new name is generated +// +// For example: +// - update 1: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 2: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) +// - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) +func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { + var ret []string + usedNames := make(map[string]bool) + newNames := make(map[internal.LocalityID]string) + for _, priority := range priorities { + var nameFound string + for _, locality := range priority { + if name, ok := ng.existingNames[locality.ID]; ok { + if !usedNames[name] { + nameFound = name + // Found a name to use. No need to process the remaining + // localities. + break + } + } + } + + if nameFound == "" { + // No appropriate used name is found. Make a new name. + nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + ng.nextID++ + } + + ret = append(ret, nameFound) + // All localities in this priority share the same name. Add them all to + // the new map. + for _, l := range priority { + newNames[l.ID] = nameFound + } + usedNames[nameFound] = true + } + ng.existingNames = newNames + return ret +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go new file mode 100644 index 000000000..728f1f709 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-cluster-resolver-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go new file mode 100644 index 000000000..5bc64b863 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -0,0 +1,321 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// resourceUpdate is a combined update from all the resources, in the order of +// priority. For example, it can be {EDS, EDS, DNS}. +type resourceUpdate struct { + // A discovery mechanism would return an empty update when it runs into + // errors, and this would result in the priority LB policy reporting + // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would + // fallback to the next highest priority that is available. + priorities []priorityConfig + // To be invoked once the update is completely processed, or is dropped in + // favor of a newer update. + onDone xdsresource.OnDoneFunc +} + +// topLevelResolver is used by concrete endpointsResolver implementations for +// reporting updates and errors. The `resourceResolver` type implements this +// interface and takes appropriate actions upon receipt of updates and errors +// from underlying concrete resolvers. +type topLevelResolver interface { + // onUpdate is called when a new update is received from the underlying + // endpointsResolver implementation. The onDone callback is to be invoked + // once the update is completely processed, or is dropped in favor of a + // newer update. + onUpdate(onDone xdsresource.OnDoneFunc) +} + +// endpointsResolver wraps the functionality to resolve a given resource name to +// a set of endpoints. The mechanism used by concrete implementations depend on +// the supported discovery mechanism type. +type endpointsResolver interface { + // lastUpdate returns endpoint results from the most recent resolution. + // + // The type of the first return result is dependent on the resolver + // implementation. + // + // The second return result indicates whether the resolver was able to + // successfully resolve the resource name to endpoints. If set to false, the + // first return result is invalid and must not be used. + lastUpdate() (any, bool) + + // resolverNow triggers re-resolution of the resource. + resolveNow() + + // stop stops resolution of the resource. Implementations must not invoke + // any methods on the topLevelResolver interface once `stop()` returns. + stop() +} + +// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so +// that the same resource resolver can be reused (e.g. when there are two +// mechanisms, both for the same EDS resource, but has different circuit +// breaking config. +type discoveryMechanismKey struct { + typ DiscoveryMechanismType + name string +} + +// discoveryMechanismAndResolver is needed to keep the resolver and the +// discovery mechanism together, because resolvers can be shared. And we need +// the mechanism for fields like circuit breaking, LRS etc when generating the +// balancer config. +type discoveryMechanismAndResolver struct { + dm DiscoveryMechanism + r endpointsResolver + + childNameGen *nameGenerator +} + +type resourceResolver struct { + parent *clusterResolverBalancer + logger *grpclog.PrefixLogger + updateChannel chan *resourceUpdate + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // mu protects the slice and map, and content of the resolvers in the slice. + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []discoveryMechanismAndResolver + // childrenMap's value only needs the resolver implementation (type + // discoveryMechanism) and the childNameGen. The other two fields are not + // used. + // + // TODO(cleanup): maybe we can make a new type with just the necessary + // fields, and use it here instead. + childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver + // Each new discovery mechanism needs a child name generator to reuse child + // policy names. But to make sure the names across discover mechanism + // doesn't conflict, we need a seq ID. This ID is incremented for each new + // discover mechanism. + childNameGeneratorSeqID uint64 +} + +func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { + rr := &resourceResolver{ + parent: parent, + logger: logger, + updateChannel: make(chan *resourceUpdate, 1), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), + } + ctx, cancel := context.WithCancel(context.Background()) + rr.serializer = grpcsync.NewCallbackSerializer(ctx) + rr.serializerCancel = cancel + return rr +} + +func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + bb := b[i] + if !aa.Equal(bb) { + return false + } + } + return true +} + +func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + case DiscoveryMechanismTypeLogicalDNS: + return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + default: + return discoveryMechanismKey{} + } +} + +func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { + rr.mu.Lock() + defer rr.mu.Unlock() + if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { + return + } + rr.mechanisms = mechanisms + rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) + newDMs := make(map[discoveryMechanismKey]bool) + + // Start one watch for each new discover mechanism {type+resource_name}. + for i, dm := range mechanisms { + dmKey := discoveryMechanismToKey(dm) + newDMs[dmKey] = true + dmAndResolver, ok := rr.childrenMap[dmKey] + if ok { + // If this is not new, keep the fields (especially childNameGen), + // and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not copied + // to dmKey, we need to keep those updated. + dmAndResolver.dm = dm + rr.children[i] = dmAndResolver + continue + } + + // Create resolver for a newly seen resource. + var resolver endpointsResolver + switch dm.Type { + case DiscoveryMechanismTypeEDS: + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) + case DiscoveryMechanismTypeLogicalDNS: + resolver = newDNSResolver(dmKey.name, rr, rr.logger) + } + dmAndResolver = discoveryMechanismAndResolver{ + dm: dm, + r: resolver, + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } + rr.childrenMap[dmKey] = dmAndResolver + rr.children[i] = dmAndResolver + rr.childNameGeneratorSeqID++ + } + + // Stop the resources that were removed. + for dm, r := range rr.childrenMap { + if !newDMs[dm] { + delete(rr.childrenMap, dm) + go r.r.stop() + } + } + // Regenerate even if there's no change in discovery mechanism, in case + // priority order changed. + rr.generateLocked(func() {}) +} + +// resolveNow is typically called to trigger re-resolve of DNS. The EDS +// resolveNow() is a noop. +func (rr *resourceResolver) resolveNow() { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, r := range rr.childrenMap { + r.r.resolveNow() + } +} + +func (rr *resourceResolver) stop(closing bool) { + rr.mu.Lock() + + // Save the previous childrenMap to stop the children outside the mutex, + // and reinitialize the map. We only need to reinitialize to allow for the + // policy to be reused if the resource comes back. In practice, this does + // not happen as the parent LB policy will also be closed, causing this to + // be removed entirely, but a future use case might want to reuse the + // policy instead. + cm := rr.childrenMap + rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) + rr.mechanisms = nil + rr.children = nil + + rr.mu.Unlock() + + for _, r := range cm { + r.r.stop() + } + + if closing { + rr.serializerCancel() + <-rr.serializer.Done() + } + + // stop() is called when the LB policy is closed or when the underlying + // cluster resource is removed by the management server. In the latter case, + // an empty config update needs to be pushed to the child policy to ensure + // that a picker that fails RPCs is sent up to the channel. + // + // Resource resolver implementations are expected to not send any updates + // after they are stopped. Therefore, we don't have to worry about another + // write to this channel happening at the same time as this one. + select { + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } + default: + } + rr.updateChannel <- &resourceUpdate{} +} + +// generateLocked collects updates from all resolvers. It pushes the combined +// result on the update channel if all child resolvers have received at least +// one update. Otherwise it returns early. +// +// The onDone callback is invoked inline if not all child resolvers have +// received at least one update. If all child resolvers have received at least +// one update, onDone is invoked when the combined update is processed by the +// clusterresolver LB policy. +// +// Caller must hold rr.mu. +func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { + var ret []priorityConfig + for _, rDM := range rr.children { + u, ok := rDM.r.lastUpdate() + if !ok { + // Don't send updates to parent until all resolvers have update to + // send. + onDone() + return + } + switch uu := u.(type) { + case xdsresource.EndpointsUpdate: + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) + case []string: + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) + } + } + select { + // A previously unprocessed update is dropped in favor of the new one, and + // the former's onDone callback is invoked to unblock the xDS client's + // receive path. + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } + default: + } + rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} +} + +func (rr *resourceResolver) onUpdate(onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + rr.mu.Lock() + rr.generateLocked(onDone) + rr.mu.Unlock() + } + rr.serializer.ScheduleOr(handleUpdate, func() { onDone() }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go new file mode 100644 index 000000000..cfc871d3b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -0,0 +1,188 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + "net/url" + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + // The dns resolver is registered by the grpc package. So, this call to + // resolver.Get() is never expected to return nil. + return resolver.Get("dns").Build(target, cc, opts) + } +) + +// dnsDiscoveryMechanism watches updates for the given DNS hostname. +// +// It implements resolver.ClientConn interface to work with the DNS resolver. +type dnsDiscoveryMechanism struct { + target string + topLevelResolver topLevelResolver + dnsR resolver.Resolver + logger *grpclog.PrefixLogger + + mu sync.Mutex + addrs []string + updateReceived bool +} + +// newDNSResolver creates an endpoints resolver which uses a DNS resolver under +// the hood. +// +// An error in parsing the provided target string or an error in creating a DNS +// resolver means that we will never be able to resolve the provided target +// strings to endpoints. The topLevelResolver propagates address updates to the +// clusterresolver LB policy **only** after it receives updates from all its +// child resolvers. Therefore, an error here means that the topLevelResolver +// will never send address updates to the clusterresolver LB policy. +// +// Calling the onError() callback will ensure that this error is +// propagated to the child policy which eventually move the channel to +// transient failure. +// +// The `dnsR` field is unset if we run into errors in this function. Therefore, a +// nil check is required wherever we access that field. +func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { + ret := &dnsDiscoveryMechanism{ + target: target, + topLevelResolver: topLevelResolver, + logger: logger, + } + u, err := url.Parse("dns:///" + target) + if err != nil { + if ret.logger.V(2) { + ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate(func() {}) + return ret + } + + r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) + if err != nil { + if ret.logger.V(2) { + ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate(func() {}) + return ret + } + ret.dnsR = r + return ret +} + +func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { + dr.mu.Lock() + defer dr.mu.Unlock() + + if !dr.updateReceived { + return nil, false + } + return dr.addrs, true +} + +func (dr *dnsDiscoveryMechanism) resolveNow() { + if dr.dnsR != nil { + dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) + } +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. The +// underlying dns resolver does not send any updates to the resolver.ClientConn +// interface passed to it (implemented by dnsDiscoveryMechanism in this case) +// after its `Close()` returns. Therefore, we can guarantee that no methods of +// the topLevelResolver are invoked after we return from this method. +func (dr *dnsDiscoveryMechanism) stop() { + if dr.dnsR != nil { + dr.dnsR.Close() + } +} + +// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive +// updates from the real DNS resolver. + +func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) + } + + dr.mu.Lock() + var addrs []string + if len(state.Endpoints) > 0 { + // Assume 1 address per endpoint, which is how DNS is expected to + // behave. The slice will grow as needed, however. + addrs = make([]string, 0, len(state.Endpoints)) + for _, e := range state.Endpoints { + for _, a := range e.Addresses { + addrs = append(addrs, a.Addr) + } + } + } else { + addrs = make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + } + dr.addrs = addrs + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate(func() {}) + return nil +} + +func (dr *dnsDiscoveryMechanism) ReportError(err error) { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) + } + + dr.mu.Lock() + // If a previous good update was received, suppress the error and continue + // using the previous update. If RPCs were succeeding prior to this, they + // will continue to do so. Also suppress errors if we previously received an + // error, since there will be no downstream effects of propagating this + // error. + if dr.updateReceived { + dr.mu.Unlock() + return + } + dr.addrs = nil + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate(func() {}) +} + +func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { + dr.UpdateState(resolver.State{Addresses: addresses}) +} + +func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { + return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go new file mode 100644 index 000000000..ddb949019 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type edsDiscoveryMechanism struct { + nameToWatch string + cancelWatch func() + topLevelResolver topLevelResolver + stopped *grpcsync.Event + logger *grpclog.PrefixLogger + + mu sync.Mutex + update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. +} + +func (er *edsDiscoveryMechanism) lastUpdate() (any, bool) { + er.mu.Lock() + defer er.mu.Unlock() + + if er.update == nil { + return nil, false + } + return *er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. +func (er *edsDiscoveryMechanism) stop() { + // Canceling a watch with the xDS client can race with an xDS response + // received around the same time, and can result in the watch callback being + // invoked after the watch is canceled. Callers need to handle this race, + // and we fire the stopped event here to ensure that a watch callback + // invocation around the same time becomes a no-op. + er.stopped.Fire() + er.cancelWatch() +} + +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + nameToWatch: nameToWatch, + topLevelResolver: topLevelResolver, + logger: logger, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) + return ret +} + +// OnUpdate is invoked to report an update for the resource being watched. +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + er.mu.Lock() + er.update = &update.Resource + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} + +func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported error: %v", er.nameToWatch, err) + } + + er.mu.Lock() + if er.update != nil { + // Continue using a previously received good configuration if one + // exists. + er.mu.Unlock() + onDone() + return + } + + // Else report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} + +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported resource-does-not-exist error", er.nameToWatch) + } + + // Report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.mu.Lock() + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go new file mode 100644 index 000000000..f5605df83 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package loadstore contains the loadStoreWrapper shared by the balancers. +package loadstore + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// NewWrapper creates a Wrapper. +func NewWrapper() *Wrapper { + return &Wrapper{} +} + +// Wrapper wraps a load store with cluster and edsService. +// +// It's store and cluster/edsService can be updated separately. And it will +// update its internal perCluster store so that new stats will be added to the +// correct perCluster. +// +// Note that this struct is a temporary workaround before we implement graceful +// switch for EDS. Any update to the clusterName and serviceName is too early, +// the perfect timing is when the picker is updated with the new connection. +// This early update could cause picks for the old SubConn being reported to the +// new services. +// +// When the graceful switch in EDS is done, there should be no need for this +// struct. The policies that record/report load shouldn't need to handle update +// of lrsServerName/cluster/edsService. Its parent should do a graceful switch +// of the whole tree when one of that changes. +type Wrapper struct { + mu sync.RWMutex + cluster string + edsService string + // store and perCluster are initialized as nil. They are only set by the + // balancer when LRS is enabled. Before that, all functions to record loads + // are no-op. + store *load.Store + perCluster load.PerClusterReporter +} + +// UpdateClusterAndService updates the cluster name and eds service for this +// wrapper. If any one of them is changed from before, the perCluster store in +// this wrapper will also be updated. +func (lsw *Wrapper) UpdateClusterAndService(cluster, edsService string) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if cluster == lsw.cluster && edsService == lsw.edsService { + return + } + lsw.cluster = cluster + lsw.edsService = edsService + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// UpdateLoadStore updates the load store for this wrapper. If it is changed +// from before, the perCluster store in this wrapper will also be updated. +func (lsw *Wrapper) UpdateLoadStore(store *load.Store) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if store == lsw.store { + return + } + lsw.store = store + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// CallStarted records a call started in the store. +func (lsw *Wrapper) CallStarted(locality string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallStarted(locality) + } +} + +// CallFinished records a call finished in the store. +func (lsw *Wrapper) CallFinished(locality string, err error) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallFinished(locality, err) + } +} + +// CallServerLoad records the server load in the store. +func (lsw *Wrapper) CallServerLoad(locality, name string, val float64) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallServerLoad(locality, name, val) + } +} + +// CallDropped records a call dropped in the store. +func (lsw *Wrapper) CallDropped(category string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallDropped(category) + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go new file mode 100644 index 000000000..53ba72c08 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -0,0 +1,918 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package outlierdetection provides an implementation of the outlier detection +// LB policy, as defined in +// https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md. +package outlierdetection + +import ( + "encoding/json" + "fmt" + "math" + "math/rand" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Globals to stub out in tests. +var ( + afterFunc = time.AfterFunc + now = time.Now +) + +// Name is the name of the outlier detection balancer. +const Name = "outlier_detection_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &outlierDetectionBalancer{ + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + channelzParent: bOpts.ChannelzParent, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.child = gracefulswitch.NewBalancer(b, bOpts) + go b.run() + return b +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &LBConfig{ + // Default top layer values as documented in A50. + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } + + // This unmarshalling handles underlying layers sre and fpe which have their + // own defaults for their fields if either sre or fpe are present. + if err := json.Unmarshal(s, lbCfg); err != nil { // Validates child config if present as well. + return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) + } + + // Note: in the xds flow, these validations will never fail. The xdsclient + // performs the same validations as here on the xds Outlier Detection + // resource before parsing resource into JSON which this function gets + // called with. A50 defines two separate places for these validations to + // take place, the xdsclient and this ParseConfig method. "When parsing a + // config from JSON, if any of these requirements is violated, that should + // be treated as a parsing error." - A50 + switch { + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + // Approximately 290 years is the maximum time that time.Duration (int64) + // can represent. The restrictions on the protobuf.Duration field are to be + // within +-10000 years. Thus, just check for negative values. + case lbCfg.Interval < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.interval = %s; must be >= 0", lbCfg.Interval) + case lbCfg.BaseEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) + case lbCfg.MaxEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + + // "The fields max_ejection_percent, + // success_rate_ejection.enforcement_percentage, + // failure_percentage_ejection.threshold, and + // failure_percentage.enforcement_percentage must have values less than or + // equal to 100." - A50 + case lbCfg.MaxEjectionPercent > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_percent = %v; must be <= 100", lbCfg.MaxEjectionPercent) + case lbCfg.SuccessRateEjection != nil && lbCfg.SuccessRateEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = %v; must be <= 100", lbCfg.SuccessRateEjection.EnforcementPercentage) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.Threshold > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) + } + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// scUpdate wraps a subConn update to be sent to the child balancer. +type scUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + +type ejectionUpdate struct { + scw *subConnWrapper + isEjected bool // true for ejected, false for unejected +} + +type lbCfgUpdate struct { + lbCfg *LBConfig + // to make sure picker is updated synchronously. + done chan struct{} +} + +type outlierDetectionBalancer struct { + // These fields are safe to be accessed without holding any mutex because + // they are synchronized in run(), which makes these field accesses happen + // serially. + // + // childState is the latest balancer state received from the child. + childState balancer.State + // recentPickerNoop represents whether the most recent picker sent upward to + // the balancer.ClientConn is a noop picker, which doesn't count RPC's. Used + // to suppress redundant picker updates. + recentPickerNoop bool + + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + channelzParent channelz.Identifier + + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer + + // mu guards access to the following fields. It also helps to synchronize + // behaviors of the following events: config updates, firing of the interval + // timer, SubConn State updates, SubConn address updates, and child state + // updates. + // + // For example, when we receive a config update in the middle of the + // interval timer algorithm, which uses knobs present in the config, the + // balancer will wait for the interval timer algorithm to finish before + // persisting the new configuration. + // + // Another example would be the updating of the addrs map, such as from a + // SubConn address update in the middle of the interval timer algorithm + // which uses addrs. This balancer waits for the interval timer algorithm to + // finish before making the update to the addrs map. + // + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). + mu sync.Mutex + addrs map[string]*addressInfo + cfg *LBConfig + scWrappers map[balancer.SubConn]*subConnWrapper + timerStartTime time.Time + intervalTimer *time.Timer + inhibitPickerUpdates bool + updateUnconditionally bool + numAddrsEjected int // For fast calculations of percentage of addrs ejected + + scUpdateCh *buffer.Unbounded + pickerUpdateCh *buffer.Unbounded +} + +// noopConfig returns whether this balancer is configured with a logical no-op +// configuration or not. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) noopConfig() bool { + return b.cfg.SuccessRateEjection == nil && b.cfg.FailurePercentageEjection == nil +} + +// onIntervalConfig handles logic required specifically on the receipt of a +// configuration which specifies to count RPC's and periodically perform passive +// health checking based on heuristics defined in configuration every configured +// interval. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onIntervalConfig() { + var interval time.Duration + if b.timerStartTime.IsZero() { + b.timerStartTime = time.Now() + for _, addrInfo := range b.addrs { + addrInfo.callCounter.clear() + } + interval = time.Duration(b.cfg.Interval) + } else { + interval = time.Duration(b.cfg.Interval) - now().Sub(b.timerStartTime) + if interval < 0 { + interval = 0 + } + } + b.intervalTimer = afterFunc(interval, b.intervalTimerAlgorithm) +} + +// onNoopConfig handles logic required specifically on the receipt of a +// configuration which specifies the balancer to be a noop. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onNoopConfig() { + // "If a config is provided with both the `success_rate_ejection` and + // `failure_percentage_ejection` fields unset, skip starting the timer and + // do the following:" + // "Unset the timer start timestamp." + b.timerStartTime = time.Time{} + for _, addrInfo := range b.addrs { + // "Uneject all currently ejected addresses." + if !addrInfo.latestEjectionTimestamp.IsZero() { + b.unejectAddress(addrInfo) + } + // "Reset each address's ejection time multiplier to 0." + addrInfo.ejectionTimeMultiplier = 0 + } +} + +func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + // Reject whole config if child policy doesn't exist, don't persist it for + // later. + bb := balancer.Get(lbCfg.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("outlier detection: child balancer %q not registered", lbCfg.ChildPolicy.Name) + } + + // It is safe to read b.cfg here without holding the mutex, as the only + // write to b.cfg happens later in this function. This function is part of + // the balancer.Balancer API, so it is guaranteed to be called in a + // synchronous manner, so it cannot race with this read. + if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() + } + + b.mu.Lock() + // Inhibit child picker updates until this UpdateClientConnState() call + // completes. If needed, a picker update containing the no-op config bit + // determined from this config and most recent state from the child will be + // sent synchronously upward at the end of this UpdateClientConnState() + // call. + b.inhibitPickerUpdates = true + b.updateUnconditionally = false + b.cfg = lbCfg + + addrs := make(map[string]bool, len(s.ResolverState.Addresses)) + for _, addr := range s.ResolverState.Addresses { + addrs[addr.Addr] = true + if _, ok := b.addrs[addr.Addr]; !ok { + b.addrs[addr.Addr] = newAddressInfo() + } + } + for addr := range b.addrs { + if !addrs[addr] { + delete(b.addrs, addr) + } + } + + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + + if b.noopConfig() { + b.onNoopConfig() + } else { + b.onIntervalConfig() + } + b.mu.Unlock() + + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() + + done := make(chan struct{}) + b.pickerUpdateCh.Put(lbCfgUpdate{ + lbCfg: lbCfg, + done: done, + }) + <-done + + return err +} + +func (b *outlierDetectionBalancer) ResolverError(err error) { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +} + +func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + scw, ok := b.scWrappers[sc] + if !ok { + // Shouldn't happen if passed down a SubConnWrapper to child on SubConn + // creation. + b.logger.Errorf("UpdateSubConnState called with SubConn that has no corresponding SubConnWrapper") + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(b.scWrappers, scw.SubConn) + } + b.scUpdateCh.Put(&scUpdate{ + scw: scw, + state: state, + }) +} + +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *outlierDetectionBalancer) Close() { + b.closed.Fire() + <-b.done.Done() + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() + + b.scUpdateCh.Close() + b.pickerUpdateCh.Close() + + b.mu.Lock() + defer b.mu.Unlock() + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } +} + +func (b *outlierDetectionBalancer) ExitIdle() { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +} + +// wrappedPicker delegates to the child policy's picker, and when the request +// finishes, it increments the corresponding counter in the map entry referenced +// by the subConnWrapper that was picked. If both the `success_rate_ejection` +// and `failure_percentage_ejection` fields are unset in the configuration, this +// picker will not count. +type wrappedPicker struct { + childPicker balancer.Picker + noopPicker bool +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := wp.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + done := func(di balancer.DoneInfo) { + if !wp.noopPicker { + incrementCounter(pr.SubConn, di) + } + if pr.Done != nil { + pr.Done(di) + } + } + scw, ok := pr.SubConn.(*subConnWrapper) + if !ok { + // This can never happen, but check is present for defensive + // programming. + logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") + return balancer.PickResult{ + SubConn: pr.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil + } + return balancer.PickResult{ + SubConn: scw.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil +} + +func incrementCounter(sc balancer.SubConn, info balancer.DoneInfo) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Shouldn't happen, as comes from child + return + } + + // scw.addressInfo and callCounter.activeBucket can be written to + // concurrently (the pointers themselves). Thus, protect the reads here with + // atomics to prevent data corruption. There exists a race in which you read + // the addressInfo or active bucket pointer and then that pointer points to + // deprecated memory. If this goroutine yields the processor, in between + // reading the addressInfo pointer and writing to the active bucket, + // UpdateAddresses can switch the addressInfo the scw points to. Writing to + // an outdated addresses is a very small race and tolerable. After reading + // callCounter.activeBucket in this picker a swap call can concurrently + // change what activeBucket points to. A50 says to swap the pointer, which + // will cause this race to write to deprecated memory the interval timer + // algorithm will never read, which makes this race alright. + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + ab := (*bucket)(atomic.LoadPointer(&addrInfo.callCounter.activeBucket)) + + if info.Err == nil { + atomic.AddUint32(&ab.numSuccesses, 1) + } else { + atomic.AddUint32(&ab.numFailures, 1) + } +} + +func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { + b.pickerUpdateCh.Put(s) +} + +func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state) } + sc, err := b.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scw := &subConnWrapper{ + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + listener: oldListener, + } + b.mu.Lock() + defer b.mu.Unlock() + b.scWrappers[sc] = scw + if len(addrs) != 1 { + return scw, nil + } + addrInfo, ok := b.addrs[addrs[0].Addr] + if !ok { + return scw, nil + } + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + if !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + return scw, nil +} + +func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +// appendIfPresent appends the scw to the address, if the address is present in +// the Outlier Detection balancers address map. Returns nil if not present, and +// the map entry if present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) appendIfPresent(addr string, scw *subConnWrapper) *addressInfo { + addrInfo, ok := b.addrs[addr] + if !ok { + return nil + } + + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + return addrInfo +} + +// removeSubConnFromAddressesMapEntry removes the scw from its map entry if +// present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) removeSubConnFromAddressesMapEntry(scw *subConnWrapper) { + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + for i, sw := range addrInfo.sws { + if scw == sw { + addrInfo.sws = append(addrInfo.sws[:i], addrInfo.sws[i+1:]...) + return + } + } +} + +func (b *outlierDetectionBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Return, shouldn't happen if passed up scw + return + } + + b.cc.UpdateAddresses(scw.SubConn, addrs) + b.mu.Lock() + defer b.mu.Unlock() + + // Note that 0 addresses is a valid update/state for a SubConn to be in. + // This is correctly handled by this algorithm (handled as part of a non singular + // old address/new address). + switch { + case len(scw.addresses) == 1 && len(addrs) == 1: // single address to single address + // If the updated address is the same, then there is nothing to do + // past this point. + if scw.addresses[0].Addr == addrs[0].Addr { + return + } + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo == nil { // uneject unconditionally because could have come from an ejected address + scw.uneject() + break + } + if addrInfo.latestEjectionTimestamp.IsZero() { // relay new updated subconn state + scw.uneject() + } else { + scw.eject() + } + case len(scw.addresses) == 1: // single address to multiple/no addresses + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo != nil { + addrInfo.callCounter.clear() + } + scw.uneject() + case len(addrs) == 1: // multiple/no addresses to single address + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo != nil && !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + } // otherwise multiple/no addresses to multiple/no addresses; ignore + + scw.addresses = addrs +} + +func (b *outlierDetectionBalancer) ResolveNow(opts resolver.ResolveNowOptions) { + b.cc.ResolveNow(opts) +} + +func (b *outlierDetectionBalancer) Target() string { + return b.cc.Target() +} + +// handleSubConnUpdate stores the recent state and forward the update +// if the SubConn is not ejected. +func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { + scw := u.scw + scw.latestState = u.state + if !scw.ejected { + if scw.listener != nil { + b.childMu.Lock() + scw.listener(u.state) + b.childMu.Unlock() + } + } +} + +// handleEjectedUpdate handles any SubConns that get ejected/unejected, and +// forwards the appropriate corresponding subConnState to the child policy. +func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + if scw.listener != nil { + b.childMu.Lock() + scw.listener(stateToUpdate) + b.childMu.Unlock() + } +} + +// handleChildStateUpdate forwards the picker update wrapped in a wrapped picker +// with the noop picker bit present. +func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { + b.childState = u + b.mu.Lock() + if b.inhibitPickerUpdates { + // If a child's state is updated during the suppression of child + // updates, the synchronous handleLBConfigUpdate function with respect + // to UpdateClientConnState should return a picker unconditionally. + b.updateUnconditionally = true + b.mu.Unlock() + return + } + noopCfg := b.noopConfig() + b.mu.Unlock() + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) +} + +// handleLBConfigUpdate compares whether the new config is a noop config or not, +// to the noop bit in the picker if present. It updates the picker if this bit +// changed compared to the picker currently in use. +func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { + lbCfg := u.lbCfg + noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil + // If the child has sent it's first update and this config flips the noop + // bit compared to the most recent picker update sent upward, then a new + // picker with this updated bit needs to be forwarded upward. If a child + // update was received during the suppression of child updates within + // UpdateClientConnState(), then a new picker needs to be forwarded with + // this updated state, irregardless of whether this new configuration flips + // the bit. + if b.childState.Picker != nil && noopCfg != b.recentPickerNoop || b.updateUnconditionally { + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) + } + b.inhibitPickerUpdates = false + b.updateUnconditionally = false + close(u.done) +} + +func (b *outlierDetectionBalancer) run() { + defer b.done.Fire() + for { + select { + case update, ok := <-b.scUpdateCh.Get(): + if !ok { + return + } + b.scUpdateCh.Load() + if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed + return + } + switch u := update.(type) { + case *scUpdate: + b.handleSubConnUpdate(u) + case *ejectionUpdate: + b.handleEjectedUpdate(u) + } + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } + b.pickerUpdateCh.Load() + if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed + return + } + switch u := update.(type) { + case balancer.State: + b.handleChildStateUpdate(u) + case lbCfgUpdate: + b.handleLBConfigUpdate(u) + } + case <-b.closed.Done(): + return + } + } +} + +// intervalTimerAlgorithm ejects and unejects addresses based on the Outlier +// Detection configuration and data about each address from the previous +// interval. +func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { + b.mu.Lock() + defer b.mu.Unlock() + b.timerStartTime = time.Now() + + for _, addrInfo := range b.addrs { + addrInfo.callCounter.swap() + } + + if b.cfg.SuccessRateEjection != nil { + b.successRateAlgorithm() + } + + if b.cfg.FailurePercentageEjection != nil { + b.failurePercentageAlgorithm() + } + + for _, addrInfo := range b.addrs { + if addrInfo.latestEjectionTimestamp.IsZero() && addrInfo.ejectionTimeMultiplier > 0 { + addrInfo.ejectionTimeMultiplier-- + continue + } + if addrInfo.latestEjectionTimestamp.IsZero() { + // Address is already not ejected, so no need to check for whether + // to uneject the address below. + continue + } + et := time.Duration(b.cfg.BaseEjectionTime) * time.Duration(addrInfo.ejectionTimeMultiplier) + met := max(time.Duration(b.cfg.BaseEjectionTime), time.Duration(b.cfg.MaxEjectionTime)) + uet := addrInfo.latestEjectionTimestamp.Add(min(et, met)) + if now().After(uet) { + b.unejectAddress(addrInfo) + } + } + + // This conditional only for testing (since the interval timer algorithm is + // called manually), will never hit in production. + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + b.intervalTimer = afterFunc(time.Duration(b.cfg.Interval), b.intervalTimerAlgorithm) +} + +// addrsWithAtLeastRequestVolume returns a slice of address information of all +// addresses with at least request volume passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) addrsWithAtLeastRequestVolume(requestVolume uint32) []*addressInfo { + var addrs []*addressInfo + for _, addrInfo := range b.addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + if rv >= requestVolume { + addrs = append(addrs, addrInfo) + } + } + return addrs +} + +// meanAndStdDev returns the mean and std dev of the fractions of successful +// requests of the addresses passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) meanAndStdDev(addrs []*addressInfo) (float64, float64) { + var totalFractionOfSuccessfulRequests float64 + var mean float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + totalFractionOfSuccessfulRequests += float64(bucket.numSuccesses) / float64(rv) + } + mean = totalFractionOfSuccessfulRequests / float64(len(addrs)) + var sumOfSquares float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + devFromMean := (float64(bucket.numSuccesses) / float64(rv)) - mean + sumOfSquares += devFromMean * devFromMean + } + variance := sumOfSquares / float64(len(addrs)) + return mean, math.Sqrt(variance) +} + +// successRateAlgorithm ejects any addresses where the success rate falls below +// the other addresses according to mean and standard deviation, and if overall +// applicable from other set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) successRateAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.SuccessRateEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.SuccessRateEjection.MinimumHosts) { + return + } + mean, stddev := b.meanAndStdDev(addrsToConsider) + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.SuccessRateEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) + requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) + if successRate < requiredSuccessRate { + channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// failurePercentageAlgorithm ejects any addresses where the failure percentage +// rate exceeds a set enforcement percentage, if overall applicable from other +// set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.FailurePercentageEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.FailurePercentageEjection.MinimumHosts) { + return + } + + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.FailurePercentageEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 + if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected++ + addrInfo.latestEjectionTimestamp = b.timerStartTime + addrInfo.ejectionTimeMultiplier++ + for _, sbw := range addrInfo.sws { + sbw.eject() + channelz.Infof(logger, b.channelzParent, "Subchannel ejected: %s", sbw) + } + +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected-- + addrInfo.latestEjectionTimestamp = time.Time{} + for _, sbw := range addrInfo.sws { + sbw.uneject() + channelz.Infof(logger, b.channelzParent, "Subchannel unejected: %s", sbw) + } +} + +// addressInfo contains the runtime information about an address that pertains +// to Outlier Detection. This struct and all of its fields is protected by +// outlierDetectionBalancer.mu in the case where it is accessed through the +// address map. In the case of Picker callbacks, the writes to the activeBucket +// of callCounter are protected by atomically loading and storing +// unsafe.Pointers (see further explanation in incrementCounter()). +type addressInfo struct { + // The call result counter object. + callCounter *callCounter + + // The latest ejection timestamp, or zero if the address is currently not + // ejected. + latestEjectionTimestamp time.Time + + // The current ejection time multiplier, starting at 0. + ejectionTimeMultiplier int64 + + // A list of subchannel wrapper objects that correspond to this address. + sws []*subConnWrapper +} + +func (a *addressInfo) String() string { + var res strings.Builder + res.WriteString("[") + for _, sw := range a.sws { + res.WriteString(sw.String()) + } + res.WriteString("]") + return res.String() +} + +func newAddressInfo() *addressInfo { + return &addressInfo{ + callCounter: newCallCounter(), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go new file mode 100644 index 000000000..4597f727b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "unsafe" +) + +type bucket struct { + numSuccesses uint32 + numFailures uint32 +} + +func newCallCounter() *callCounter { + return &callCounter{ + activeBucket: unsafe.Pointer(&bucket{}), + inactiveBucket: &bucket{}, + } +} + +// callCounter has two buckets, which each count successful and failing RPC's. +// The activeBucket is used to actively count any finished RPC's, and the +// inactiveBucket is populated with this activeBucket's data every interval for +// use by the Outlier Detection algorithm. +type callCounter struct { + // activeBucket updates every time a call finishes (from picker passed to + // Client Conn), so protect pointer read with atomic load of unsafe.Pointer + // so picker does not have to grab a mutex per RPC, the critical path. + activeBucket unsafe.Pointer // bucket + inactiveBucket *bucket +} + +func (cc *callCounter) clear() { + atomic.StorePointer(&cc.activeBucket, unsafe.Pointer(&bucket{})) + cc.inactiveBucket = &bucket{} +} + +// "When the timer triggers, the inactive bucket is zeroed and swapped with the +// active bucket. Then the inactive bucket contains the number of successes and +// failures since the last time the timer triggered. Those numbers are used to +// evaluate the ejection criteria." - A50. +func (cc *callCounter) swap() { + ib := cc.inactiveBucket + *ib = bucket{} + ab := (*bucket)(atomic.SwapPointer(&cc.activeBucket, unsafe.Pointer(ib))) + cc.inactiveBucket = &bucket{ + numSuccesses: atomic.LoadUint32(&ab.numSuccesses), + numFailures: atomic.LoadUint32(&ab.numFailures), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 000000000..196a562ed --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,240 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "encoding/json" + "time" + + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type successRateEjection SuccessRateEjection + +// UnmarshalJSON unmarshals JSON into SuccessRateEjection. If a +// SuccessRateEjection field is not set, that field will get its default value. +func (sre *SuccessRateEjection) UnmarshalJSON(j []byte) error { + sre.StdevFactor = 1900 + sre.EnforcementPercentage = 100 + sre.MinimumHosts = 5 + sre.RequestVolume = 100 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*successRateEjection)(sre)) +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type failurePercentageEjection FailurePercentageEjection + +// UnmarshalJSON unmarshals JSON into FailurePercentageEjection. If a +// FailurePercentageEjection field is not set, that field will get its default +// value. +func (fpe *FailurePercentageEjection) UnmarshalJSON(j []byte) error { + fpe.Threshold = 85 + fpe.EnforcementPercentage = 0 + fpe.MinimumHosts = 5 + fpe.RequestVolume = 50 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*failurePercentageEjection)(fpe)) +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval iserviceconfig.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type lbConfig LBConfig + +// UnmarshalJSON unmarshals JSON into LBConfig. If a top level LBConfig field +// (i.e. not next layer sre or fpe) is not set, that field will get its default +// value. If sre or fpe is not set, it will stay unset, otherwise it will +// unmarshal on those types populating with default values for their fields if +// needed. +func (lbc *LBConfig) UnmarshalJSON(j []byte) error { + // Default top layer values as documented in A50. + lbc.Interval = iserviceconfig.Duration(10 * time.Second) + lbc.BaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + lbc.MaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + lbc.MaxEjectionPercent = 10 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*lbConfig)(lbc)) +} + +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go new file mode 100644 index 000000000..705b0cb69 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[outlier-detection-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *outlierDetectionBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go new file mode 100644 index 000000000..0fa422d8f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "fmt" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/resolver" +) + +// subConnWrapper wraps every created SubConn in the Outlier Detection Balancer, +// to help track the latest state update from the underlying SubConn, and also +// whether or not this SubConn is ejected. +type subConnWrapper struct { + balancer.SubConn + listener func(balancer.SubConnState) + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded + + // addresses is the list of address(es) this SubConn was created with to + // help support any change in address(es) + addresses []resolver.Address +} + +// eject causes the wrapper to report a state update with the TRANSIENT_FAILURE +// state, and to stop passing along updates from the underlying subchannel. +func (scw *subConnWrapper) eject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: true, + }) +} + +// uneject causes the wrapper to report a state update with the latest update +// from the underlying subchannel, and resume passing along updates from the +// underlying subchannel. +func (scw *subConnWrapper) uneject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: false, + }) +} + +func (scw *subConnWrapper) String() string { + return fmt.Sprintf("%+v", scw.addresses) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go new file mode 100644 index 000000000..c17c62f23 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -0,0 +1,289 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package priority implements the priority balancer. +// +// This balancer will be kept in internal until we use it in the xds balancers, +// and are confident its functionalities are stable. It will then be exported +// for more users. +package priority + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the priority balancer. +const Name = "priority_experimental" + +// DefaultSubBalancerCloseTimeout is defined as a variable instead of const for +// testing. +var DefaultSubBalancerCloseTimeout = 15 * time.Minute + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &priorityBalancer{ + cc: cc, + done: grpcsync.NewEvent(), + children: make(map[string]*childBalancer), + childBalancerStateUpdate: buffer.NewUnbounded(), + } + + b.logger = prefixLogger(b) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b, + Logger: b.logger, + SubBalancerCloseTimeout: DefaultSubBalancerCloseTimeout, + }) + b.bg.Start() + go b.run() + b.logger.Infof("Created") + return b +} + +func (b bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(s) +} + +func (bb) Name() string { + return Name +} + +// timerWrapper wraps a timer with a boolean. So that when a race happens +// between AfterFunc and Stop, the func is guaranteed to not execute. +type timerWrapper struct { + stopped bool + timer *time.Timer +} + +type priorityBalancer struct { + logger *grpclog.PrefixLogger + cc balancer.ClientConn + bg *balancergroup.BalancerGroup + done *grpcsync.Event + childBalancerStateUpdate *buffer.Unbounded + + mu sync.Mutex + childInUse string + // priorities is a list of child names from higher to lower priority. + priorities []string + // children is a map from child name to sub-balancers. + children map[string]*childBalancer + + // Set during UpdateClientConnState when calling into sub-balancers. + // Prevents child updates from recomputing the active priority or sending + // an update of the aggregated picker to the parent. Cleared after all + // sub-balancers have finished UpdateClientConnState, after which + // syncPriority is called manually. + inhibitPickerUpdates bool +} + +func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.logger.V(2) { + b.logger.Infof("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + b.mu.Lock() + // Create and remove children, since we know all children from the config + // are used by some priority. + for name, newSubConfig := range newConfig.Children { + bb := balancer.Get(newSubConfig.Config.Name) + if bb == nil { + b.logger.Errorf("balancer name %v from config is not registered", newSubConfig.Config.Name) + continue + } + + currentChild, ok := b.children[name] + if !ok { + // This is a new child, add it to the children list. But note that + // the balancer isn't built, because this child can be a low + // priority. If necessary, it will be built when syncing priorities. + cb := newChildBalancer(name, b, bb.Name(), b.cc) + cb.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + b.children[name] = cb + continue + } + + // This is not a new child. But the config/addresses could change. + + // The balancing policy name is changed, close the old child. But don't + // rebuild, rebuild will happen when syncing priorities. + if currentChild.balancerName != bb.Name() { + currentChild.stop() + currentChild.updateBalancerName(bb.Name()) + } + + // Update config and address, but note that this doesn't send the + // updates to non-started child balancers (the child balancer might not + // be built, if it's a low priority). + currentChild.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + } + // Cleanup resources used by children removed from the config. + for name, oldChild := range b.children { + if _, ok := newConfig.Children[name]; !ok { + oldChild.stop() + delete(b.children, name) + } + } + + // Update priorities and handle priority changes. + b.priorities = newConfig.Priorities + + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + b.mu.Unlock() + return nil + } + + // This will sync the states of all children to the new updated + // priorities. Includes starting/stopping child balancers when necessary. + // Block picker updates until all children have had a chance to call + // UpdateState to prevent races where, e.g., the active priority reports + // transient failure but a higher priority may have reported something that + // made it active, and if the transient failure update is handled first, + // RPCs could fail. + b.inhibitPickerUpdates = true + // Add an item to queue to notify us when the current items in the queue + // are done and syncPriority has been called. + done := make(chan struct{}) + b.childBalancerStateUpdate.Put(resumePickerUpdates{done: done}) + b.mu.Unlock() + <-done + + return nil +} + +func (b *priorityBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *priorityBalancer) Close() { + b.bg.Close() + b.childBalancerStateUpdate.Close() + + b.mu.Lock() + defer b.mu.Unlock() + b.done.Fire() + // Clear states of the current child in use, so if there's a race in picker + // update, it will be dropped. + b.childInUse = "" + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } +} + +func (b *priorityBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// UpdateState implements balancergroup.BalancerStateAggregator interface. The +// balancer group sends new connectivity state and picker here. +func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { + b.childBalancerStateUpdate.Put(childBalancerState{ + name: childName, + s: state, + }) +} + +type childBalancerState struct { + name string + s balancer.State +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// run handles child update in a separate goroutine, so if the child sends +// updates inline (when called by parent), it won't cause deadlocks (by trying +// to hold the same mutex). +func (b *priorityBalancer) run() { + for { + select { + case u, ok := <-b.childBalancerStateUpdate.Get(): + if !ok { + return + } + b.childBalancerStateUpdate.Load() + // Needs to handle state update in a goroutine, because each state + // update needs to start/close child policy, could result in + // deadlock. + b.mu.Lock() + if b.done.HasFired() { + b.mu.Unlock() + return + } + switch s := u.(type) { + case childBalancerState: + b.handleChildStateUpdate(s.name, s.s) + case resumePickerUpdates: + b.inhibitPickerUpdates = false + b.syncPriority(b.childInUse) + close(s.done) + } + b.mu.Unlock() + case <-b.done.Done(): + return + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go new file mode 100644 index 000000000..7e8ccbd33 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +type childBalancer struct { + name string + parent *priorityBalancer + parentCC balancer.ClientConn + balancerName string + cc *ignoreResolveNowClientConn + + ignoreReresolutionRequests bool + config serviceconfig.LoadBalancingConfig + rState resolver.State + + started bool + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + // The latest state the child balancer provided. + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper +} + +// newChildBalancer creates a child balancer place holder, but doesn't +// build/start the child balancer. +func newChildBalancer(name string, parent *priorityBalancer, balancerName string, cc balancer.ClientConn) *childBalancer { + return &childBalancer{ + name: name, + parent: parent, + parentCC: cc, + balancerName: balancerName, + cc: newIgnoreResolveNowClientConn(cc, false), + started: false, + // Start with the connecting state and picker with re-pick error, so + // that when a priority switch causes this child picked before it's + // balancing policy is created, a re-pick will happen. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + } +} + +// updateBalancerName updates balancer name for the child, but doesn't build a +// new one. The parent priority LB always closes the child policy before +// updating the balancer name, and the new balancer is built when it gets added +// to the balancergroup as part of start(). +func (cb *childBalancer) updateBalancerName(balancerName string) { + cb.balancerName = balancerName + cb.cc = newIgnoreResolveNowClientConn(cb.parentCC, cb.ignoreReresolutionRequests) +} + +// updateConfig sets childBalancer's config and state, but doesn't send update to +// the child balancer unless it is started. +func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { + cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests + cb.config = child.Config.Config + cb.rState = rState + if cb.started { + cb.sendUpdate() + } +} + +// start builds the child balancer if it's not already started. +// +// It doesn't do it directly. It asks the balancer group to build it. +func (cb *childBalancer) start() { + if cb.started { + return + } + cb.started = true + cb.parent.bg.AddWithClientConn(cb.name, cb.balancerName, cb.cc) + cb.startInitTimer() + cb.sendUpdate() +} + +// sendUpdate sends the addresses and config to the child balancer. +func (cb *childBalancer) sendUpdate() { + cb.cc.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) + // TODO: return and aggregate the returned error in the parent. + err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ + ResolverState: cb.rState, + BalancerConfig: cb.config, + }) + if err != nil { + cb.parent.logger.Warningf("Failed to update state for child policy %q: %v", cb.name, err) + } +} + +// stop stops the child balancer and resets the state. +// +// It doesn't do it directly. It asks the balancer group to remove it. +// +// Note that the underlying balancer group could keep the child in a cache. +func (cb *childBalancer) stop() { + if !cb.started { + return + } + cb.stopInitTimer() + cb.parent.bg.Remove(cb.name) + cb.started = false + cb.state = balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority("") + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go new file mode 100644 index 000000000..0be807c13 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "errors" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +var ( + // ErrAllPrioritiesRemoved is returned by the picker when there's no priority available. + ErrAllPrioritiesRemoved = errors.New("no priority is provided, all priorities are removed") + // DefaultPriorityInitTimeout is the timeout after which if a priority is + // not READY, the next will be started. It's exported to be overridden by + // tests. + DefaultPriorityInitTimeout = 10 * time.Second +) + +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). +// +// It's guaranteed that after this function returns: +// +// If some child is READY, it is childInUse, and all lower priorities are +// closed. +// +// If some child is newly started(in Connecting for the first time), it is +// childInUse, and all lower priorities are closed. +// +// Otherwise, the lowest priority is childInUse (none of the children is +// ready, and the overall state is not ready). +// +// Steps: +// +// If all priorities were deleted, unset childInUse (to an empty string), and +// set parent ClientConn to TransientFailure +// +// Otherwise, Scan all children from p0, and check balancer stats: +// +// For any of the following cases: +// +// If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// +// If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// +// If balancer is READY or IDLE +// +// If this is the lowest priority +// +// do the following: +// +// if this is not the old childInUse, override picker so old picker is no +// longer used. +// +// switch to it (because all higher priorities are neither new or Ready) +// +// forward the new addresses and config +// +// Caller must hold b.mu. +func (b *priorityBalancer) syncPriority(childUpdating string) { + if b.inhibitPickerUpdates { + if b.logger.V(2) { + b.logger.Infof("Skipping update from child policy %q", childUpdating) + } + return + } + for p, name := range b.priorities { + child, ok := b.children[name] + if !ok { + b.logger.Warningf("Priority name %q is not found in list of child policies", name) + continue + } + + if !child.started || + child.state.ConnectivityState == connectivity.Ready || + child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || + p == len(b.priorities)-1 { + if b.childInUse != child.name || child.name == childUpdating { + if b.logger.V(2) { + b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + } + // If we switch children or the child in use just updated its + // picker, push the child's picker to the parent. + b.cc.UpdateState(child.state) + } + if b.logger.V(2) { + b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p) + } + b.switchToChild(child, p) + break + } + } +} + +// Stop priorities [p+1, lowest]. +// +// Caller must hold b.mu. +func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { + for i := p + 1; i < len(b.priorities); i++ { + name := b.priorities[i] + child, ok := b.children[name] + if !ok { + b.logger.Warningf("Priority name %q is not found in list of child policies", name) + continue + } + child.stop() + } +} + +// switchToChild does the following: +// - stop all child with lower priorities +// - if childInUse is not this child +// - set childInUse to this child +// - if this child is not started, start it +// +// Note that it does NOT send the current child state (picker) to the parent +// ClientConn. The caller needs to send it if necessary. +// +// this can be called when +// 1. first update, start p0 +// 2. an update moves a READY child from a lower priority to higher +// 2. a different builder is updated for this child +// 3. a high priority goes Failure, start next +// 4. a high priority init timeout, start next +// +// Caller must hold b.mu. +func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { + // Stop lower priorities even if childInUse is same as this child. It's + // possible this child was moved from a priority to another. + b.stopSubBalancersLowerThanPriority(priority) + + // If this child is already in use, do nothing. + // + // This can happen: + // - all priorities are not READY, an config update always triggers switch + // to the lowest. In this case, the lowest child could still be connecting, + // so we don't stop the init timer. + // - a high priority is READY, an config update always triggers switch to + // it. + if b.childInUse == child.name && child.started { + return + } + b.childInUse = child.name + + if !child.started { + child.start() + } +} + +// handleChildStateUpdate start/close priorities based on the connectivity +// state. +func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { + // Update state in child. The updated picker will be sent to parent later if + // necessary. + child, ok := b.children[childName] + if !ok { + b.logger.Warningf("Child policy not found for %q", childName) + return + } + if !child.started { + b.logger.Warningf("Ignoring update from child policy %q which is not in started state: %+v", childName, s) + return + } + child.state = s + + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + child.reportedTF = false + child.stopInitTimer() + case connectivity.TransientFailure: + child.reportedTF = true + child.stopInitTimer() + case connectivity.Connecting: + if !child.reportedTF { + child.startInitTimer() + } + default: + // New state is Shutdown, should never happen. Don't forward. + } + + child.parent.syncPriority(childName) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go new file mode 100644 index 000000000..37f1c9a82 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Child is a child of priority balancer. +type Child struct { + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + IgnoreReresolutionRequests bool `json:"ignoreReresolutionRequests,omitempty"` +} + +// LBConfig represents priority balancer's config. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Children is a map from the child balancer names to their configs. Child + // names can be found in field Priorities. + Children map[string]*Child `json:"children,omitempty"` + // Priorities is a list of child balancer names. They are sorted from + // highest priority to low. The type/config for each child can be found in + // field Children, with the balancer name as the key. + Priorities []string `json:"priorities,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + + prioritiesSet := make(map[string]bool) + for _, name := range cfg.Priorities { + if _, ok := cfg.Children[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Priorities field (%v) is not found in Children field (%+v)", name, cfg.Priorities, cfg.Children) + } + prioritiesSet[name] = true + } + for name := range cfg.Children { + if _, ok := prioritiesSet[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Children field (%v) is not found in Priorities field (%+v)", name, cfg.Children, cfg.Priorities) + } + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go new file mode 100644 index 000000000..792ee4b3f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// ignoreResolveNowClientConn wraps a balancer.ClientConn and overrides the +// ResolveNow() method to ignore those calls if the ignoreResolveNow bit is set. +type ignoreResolveNowClientConn struct { + balancer.ClientConn + ignoreResolveNow *uint32 +} + +func newIgnoreResolveNowClientConn(cc balancer.ClientConn, ignore bool) *ignoreResolveNowClientConn { + ret := &ignoreResolveNowClientConn{ + ClientConn: cc, + ignoreResolveNow: new(uint32), + } + ret.updateIgnoreResolveNow(ignore) + return ret +} + +func (i *ignoreResolveNowClientConn) updateIgnoreResolveNow(b bool) { + if b { + atomic.StoreUint32(i.ignoreResolveNow, 1) + return + } + atomic.StoreUint32(i.ignoreResolveNow, 0) + +} + +func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { + if atomic.LoadUint32(i.ignoreResolveNow) != 0 { + return + } + i.ClientConn.ResolveNow(o) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go new file mode 100644 index 000000000..2fb8d2d20 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[priority-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *priorityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go new file mode 100644 index 000000000..45fbe7644 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go new file mode 100644 index 000000000..b4afcf100 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/serviceconfig" +) + +// LBConfig is the balancer config for ring_hash balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + MinRingSize uint64 `json:"minRingSize,omitempty"` + MaxRingSize uint64 `json:"maxRingSize,omitempty"` +} + +const ( + defaultMinSize = 1024 + defaultMaxSize = 4096 + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if cfg.MinRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("min_ring_size value of %d is greater than max supported value %d for this field", cfg.MinRingSize, ringHashSizeUpperBound) + } + if cfg.MaxRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("max_ring_size value of %d is greater than max supported value %d for this field", cfg.MaxRingSize, ringHashSizeUpperBound) + } + if cfg.MinRingSize == 0 { + cfg.MinRingSize = defaultMinSize + } + if cfg.MaxRingSize == 0 { + cfg.MaxRingSize = defaultMaxSize + } + if cfg.MinRingSize > cfg.MaxRingSize { + return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) + } + if cfg.MinRingSize > envconfig.RingHashCap { + cfg.MinRingSize = envconfig.RingHashCap + } + if cfg.MaxRingSize > envconfig.RingHashCap { + cfg.MaxRingSize = envconfig.RingHashCap + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go new file mode 100644 index 000000000..3e0f0adf5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[ring-hash-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} + +func subConnPrefixLogger(p *ringhashBalancer, sc *subConn) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)+fmt.Sprintf("[subConn %p] ", sc)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go new file mode 100644 index 000000000..5ce72cade --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -0,0 +1,161 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" +) + +type picker struct { + ring *ring + logger *grpclog.PrefixLogger + subConnStates map[*subConn]connectivity.State +} + +func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { + states := make(map[*subConn]connectivity.State) + for _, e := range ring.items { + states[e.sc] = e.sc.effectiveState() + } + return &picker{ring: ring, logger: logger, subConnStates: states} +} + +// handleRICSResult is the return type of handleRICS. It's needed to wrap the +// returned error from Pick() in a struct. With this, if the return values are +// `balancer.PickResult, error, bool`, linter complains because error is not the +// last return value. +type handleRICSResult struct { + pr balancer.PickResult + err error +} + +// handleRICS generates pick result if the entry is in Ready, Idle, Connecting +// or Shutdown. TransientFailure will be handled specifically after this +// function returns. +// +// The first return value indicates if the state is in Ready, Idle, Connecting +// or Shutdown. If it's true, the PickResult and error should be returned from +// Pick() as is. +func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { + switch state := p.subConnStates[e.sc]; state { + case connectivity.Ready: + return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true + case connectivity.Idle: + // Trigger Connect() and queue the pick. + e.sc.queueConnect() + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.Connecting: + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.TransientFailure: + // Return ok==false, so TransientFailure will be handled afterwards. + return handleRICSResult{}, false + case connectivity.Shutdown: + // Shutdown can happen in a race where the old picker is called. A new + // picker should already be sent. + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + default: + // Should never reach this. All the connectivity states are already + // handled in the cases. + p.logger.Errorf("SubConn has undefined connectivity state: %v", state) + return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true + } +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + e := p.ring.pick(getRequestHash(info.Ctx)) + if hr, ok := p.handleRICS(e); ok { + return hr.pr, hr.err + } + // ok was false, the entry is in transient failure. + return p.handleTransientFailure(e) +} + +func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, error) { + // Queue a connect on the first picked SubConn. + e.sc.queueConnect() + + // Find next entry in the ring, skipping duplicate SubConns. + e2 := nextSkippingDuplicates(p.ring, e) + if e2 == nil { + // There's no next entry available, fail the pick. + return balancer.PickResult{}, fmt.Errorf("the only SubConn is in Transient Failure") + } + + // For the second SubConn, also check Ready/Idle/Connecting as if it's the + // first entry. + if hr, ok := p.handleRICS(e2); ok { + return hr.pr, hr.err + } + + // The second SubConn is also in TransientFailure. Queue a connect on it. + e2.sc.queueConnect() + + // If it gets here, this is after the second SubConn, and the second SubConn + // was in TransientFailure. + // + // Loop over all other SubConns: + // - If all SubConns so far are all TransientFailure, trigger Connect() on + // the TransientFailure SubConns, and keep going. + // - If there's one SubConn that's not in TransientFailure, keep checking + // the remaining SubConns (in case there's a Ready, which will be returned), + // but don't not trigger Connect() on the other SubConns. + var firstNonFailedFound bool + for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { + scState := p.subConnStates[ee.sc] + if scState == connectivity.Ready { + return balancer.PickResult{SubConn: ee.sc.sc}, nil + } + if firstNonFailedFound { + continue + } + if scState == connectivity.TransientFailure { + // This will queue a connect. + ee.sc.queueConnect() + continue + } + // This is a SubConn in a non-failure state. We continue to check the + // other SubConns, but remember that there was a non-failed SubConn + // seen. After this, Pick() will never trigger any SubConn to Connect(). + firstNonFailedFound = true + if scState == connectivity.Idle { + // This is the first non-failed SubConn, and it is in a real Idle + // state. Trigger it to Connect(). + ee.sc.queueConnect() + } + } + return balancer.PickResult{}, fmt.Errorf("no connection is Ready") +} + +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. +func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { + for next := ring.next(entry); next != entry; next = ring.next(next) { + if next.sc != entry.sc { + return next + } + } + // There's no qualifying next entry. + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go new file mode 100644 index 000000000..45dbb2d2a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -0,0 +1,182 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "math" + "sort" + "strconv" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/resolver" +) + +type ring struct { + items []*ringEntry +} + +type subConnWithWeight struct { + sc *subConn + weight float64 +} + +type ringEntry struct { + idx int + hash uint64 + sc *subConn +} + +// newRing creates a ring from the subConns stored in the AddressMap. The ring +// size is limited by the passed in max/min. +// +// ring entries will be created for each subConn, and subConn with high weight +// (specified by the address) may have multiple entries. +// +// For example, for subConns with weights {a:3, b:3, c:4}, a generated ring of +// size 10 could be: +// - {idx:0 hash:3689675255460411075 b} +// - {idx:1 hash:4262906501694543955 c} +// - {idx:2 hash:5712155492001633497 c} +// - {idx:3 hash:8050519350657643659 b} +// - {idx:4 hash:8723022065838381142 b} +// - {idx:5 hash:11532782514799973195 a} +// - {idx:6 hash:13157034721563383607 c} +// - {idx:7 hash:14468677667651225770 c} +// - {idx:8 hash:17336016884672388720 a} +// - {idx:9 hash:18151002094784932496 a} +// +// To pick from a ring, a binary search will be done for the given target hash, +// and first item with hash >= given hash will be returned. +// +// Must be called with a non-empty subConns map. +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { + if logger.V(2) { + logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + } + + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 + normalizedWeights, minWeight := normalizeWeights(subConns) + if logger.V(2) { + logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights) + } + + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. + + // Scale up the size of the ring such that the least-weighted host gets a + // whole number of hashes on the ring. + // + // Note that size is limited by the input max/min. + scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) + ringSize := math.Ceil(scale) + items := make([]*ringEntry, 0, int(ringSize)) + if logger.V(2) { + logger.Infof("newRing: creating new ring of size %v", ringSize) + } + + // For each entry, scale*weight nodes are generated in the ring. + // + // Not all of these are whole numbers. E.g. for weights {a:3,b:3,c:4}, if + // ring size is 7, scale is 6.66. The numbers of nodes will be + // {a,a,b,b,c,c,c}. + // + // A hash is generated for each item, and later the results will be sorted + // based on the hash. + var currentHashes, targetHashes float64 + for _, scw := range normalizedWeights { + targetHashes += scale * scw.weight + // This index ensures that ring entries corresponding to the same + // address hash to different values. And since this index is + // per-address, these entries hash to the same value across address + // updates. + idx := 0 + for currentHashes < targetHashes { + h := xxhash.Sum64String(scw.sc.addr + "_" + strconv.Itoa(idx)) + items = append(items, &ringEntry{hash: h, sc: scw.sc}) + idx++ + currentHashes++ + } + } + + // Sort items based on hash, to prepare for binary search. + sort.Slice(items, func(i, j int) bool { return items[i].hash < items[j].hash }) + for i, ii := range items { + ii.idx = i + } + return &ring{items: items} +} + +// normalizeWeights calculates the normalized weights for each subConn in the +// given subConns map. It returns a slice of subConnWithWeight structs, where +// each struct contains a subConn and its corresponding weight. The function +// also returns the minimum weight among all subConns. +// +// The normalized weight of each subConn is calculated by dividing its weight +// attribute by the sum of all subConn weights. If the weight attribute is not +// found on the address, a default weight of 1 is used. +// +// The addresses are sorted in ascending order to ensure consistent results. +// +// Must be called with a non-empty subConns map. +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { + var weightSum uint32 + // Since attributes are explicitly ignored in the AddressMap key, we need to + // iterate over the values to get the weights. + scVals := subConns.Values() + for _, a := range scVals { + weightSum += a.(*subConn).weight + } + ret := make([]subConnWithWeight, 0, subConns.Len()) + min := 1.0 + for _, a := range scVals { + scInfo := a.(*subConn) + // (*subConn).weight is set to 1 if the weight attribute is not found on + // the address. And since this function is guaranteed to be called with + // a non-empty subConns map, weightSum is guaranteed to be non-zero. So, + // we need not worry about divide by zero error here. + nw := float64(scInfo.weight) / float64(weightSum) + ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) + min = math.Min(min, nw) + } + // Sort the addresses to return consistent results. + // + // Note: this might not be necessary, but this makes sure the ring is + // consistent as long as the addresses are the same, for example, in cases + // where an address is added and then removed, the RPCs will still pick the + // same old SubConn. + sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) + return ret, min +} + +// pick does a binary search. It returns the item with smallest index i that +// r.items[i].hash >= h. +func (r *ring) pick(h uint64) *ringEntry { + i := sort.Search(len(r.items), func(i int) bool { return r.items[i].hash >= h }) + if i == len(r.items) { + // If not found, and h is greater than the largest hash, return the + // first item. + i = 0 + } + return r.items[i] +} + +// next returns the next entry. +func (r *ring) next(e *ringEntry) *ringEntry { + return r.items[(e.idx+1)%len(r.items)] +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go new file mode 100644 index 000000000..ef054d48a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -0,0 +1,527 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash implements the ringhash balancer. +package ringhash + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &ringhashBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + orderedSubConns: make([]*subConn, 0), + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type subConn struct { + addr string + weight uint32 + sc balancer.SubConn + logger *grpclog.PrefixLogger + + mu sync.RWMutex + // This is the actual state of this SubConn (as updated by the ClientConn). + // The effective state can be different, see comment of attemptedToConnect. + state connectivity.State + // failing is whether this SubConn is in a failing state. A subConn is + // considered to be in a failing state if it was previously in + // TransientFailure. + // + // This affects the effective connectivity state of this SubConn, e.g. + // - if the actual state is Idle or Connecting, but this SubConn is failing, + // the effective state is TransientFailure. + // + // This is used in pick(). E.g. if a subConn is Idle, but has failing as + // true, pick() will + // - consider this SubConn as TransientFailure, and check the state of the + // next SubConn. + // - trigger Connect() (note that normally a SubConn in real + // TransientFailure cannot Connect()) + // + // A subConn starts in non-failing (failing is false). A transition to + // TransientFailure sets failing to true (and it stays true). A transition + // to Ready sets failing to false. + failing bool + // connectQueued is true if a Connect() was queued for this SubConn while + // it's not in Idle (most likely was in TransientFailure). A Connect() will + // be triggered on this SubConn when it turns Idle. + // + // When connectivity state is updated to Idle for this SubConn, if + // connectQueued is true, Connect() will be called on the SubConn. + connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool +} + +// setState updates the state of this SubConn. +// +// It also handles the queued Connect(). If the new state is Idle, and a +// Connect() was queued, this SubConn will be triggered to Connect(). +func (sc *subConn) setState(s connectivity.State) { + sc.mu.Lock() + defer sc.mu.Unlock() + switch s { + case connectivity.Idle: + // Trigger Connect() if new state is Idle, and there is a queued connect. + if sc.connectQueued { + sc.connectQueued = false + sc.logger.Infof("Executing a queued connect for subConn moving to state: %v", sc.state) + sc.sc.Connect() + } else { + sc.attemptingToConnect = false + } + case connectivity.Connecting: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + case connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + sc.attemptingToConnect = false + // Set to a non-failing state. + sc.failing = false + case connectivity.TransientFailure: + // Set to a failing state. + sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false + } + sc.state = s +} + +// effectiveState returns the effective state of this SubConn. It can be +// different from the actual state, e.g. Idle while the subConn is failing is +// considered TransientFailure. Read comment of field failing for other cases. +func (sc *subConn) effectiveState() connectivity.State { + sc.mu.RLock() + defer sc.mu.RUnlock() + if sc.failing && (sc.state == connectivity.Idle || sc.state == connectivity.Connecting) { + return connectivity.TransientFailure + } + return sc.state +} + +// queueConnect sets a boolean so that when the SubConn state changes to Idle, +// it's Connect() will be triggered. If the SubConn state is already Idle, it +// will just call Connect(). +func (sc *subConn) queueConnect() { + sc.mu.Lock() + defer sc.mu.Unlock() + sc.attemptingToConnect = true + if sc.state == connectivity.Idle { + sc.logger.Infof("Executing a queued connect for subConn in state: %v", sc.state) + sc.sc.Connect() + return + } + // Queue this connect, and when this SubConn switches back to Idle (happens + // after backoff in TransientFailure), it will Connect(). + sc.logger.Infof("Queueing a connect for subConn in state: %v", sc.state) + sc.connectQueued = true +} + +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + +type ringhashBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + config *LBConfig + subConns *resolver.AddressMap // Map from resolver.Address to `*subConn`. + scStates map[balancer.SubConn]*subConn + + // ring is always in sync with subConns. When subConns change, a new ring is + // generated. Note that address weights updates (they are keys in the + // subConns map) also regenerates the ring. + ring *ring + picker balancer.Picker + csEvltr *connectivityStateEvaluator + state connectivity.State + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + + // orderedSubConns contains the list of subconns in the order that addresses + // appear from the resolver. Together with lastInternallyTriggeredSCIndex, + // this allows triggering connection attempts to all SubConns independently + // of the order they appear on the ring. Always in sync with ring and + // subConns. The index is reset when addresses change. + orderedSubConns []*subConn + lastInternallyTriggeredSCIndex int +} + +// updateAddresses creates new SubConns and removes SubConns, based on the +// address update. +// +// The return value is whether the new address list is different from the +// previous. True if +// - an address was added +// - an address was removed +// - an address's weight was updated +// +// Note that this function doesn't trigger SubConn connecting, so all the new +// SubConn states are Idle. +func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { + var addrsUpdated bool + // addrsSet is the set converted from addrs, used for quick lookup. + addrsSet := resolver.NewAddressMap() + + b.orderedSubConns = b.orderedSubConns[:0] // reuse the underlying array. + + for _, addr := range addrs { + addrsSet.Set(addr, true) + newWeight := getWeightAttribute(addr) + if val, ok := b.subConns.Get(addr); !ok { + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: true, + StateListener: func(state balancer.SubConnState) { b.updateSubConnState(sc, state) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + b.logger.Warningf("Failed to create new SubConn: %v", err) + continue + } + scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} + scs.logger = subConnPrefixLogger(b, scs) + scs.setState(connectivity.Idle) + b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) + b.subConns.Set(addr, scs) + b.scStates[sc] = scs + b.orderedSubConns = append(b.orderedSubConns, scs) + addrsUpdated = true + } else { + // We have seen this address before and created a subConn for it. If the + // weight associated with the address has changed, update the subConns map + // with the new weight. This will be used when a new ring is created. + // + // There is no need to call UpdateAddresses on the subConn at this point + // since *only* the weight attribute has changed, and that does not affect + // subConn uniqueness. + scInfo := val.(*subConn) + b.orderedSubConns = append(b.orderedSubConns, scInfo) + if oldWeight := scInfo.weight; oldWeight != newWeight { + scInfo.weight = newWeight + b.subConns.Set(addr, scInfo) + // Return true to force recreation of the ring. + addrsUpdated = true + } + } + } + for _, addr := range b.subConns.Keys() { + // addr was removed by resolver. + if _, ok := addrsSet.Get(addr); !ok { + v, _ := b.subConns.Get(addr) + scInfo := v.(*subConn) + scInfo.sc.Shutdown() + b.subConns.Delete(addr) + addrsUpdated = true + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + if addrsUpdated { + b.lastInternallyTriggeredSCIndex = 0 + } + return addrsUpdated +} + +func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // If addresses were updated, whether it resulted in SubConn + // creation/deletion, or just weight update, we need to regenerate the ring + // and send a new picker. + regenerateRing := b.updateAddresses(s.ResolverState.Addresses) + + // If the ring configuration has changed, we need to regenerate the ring and + // send a new picker. + if b.config == nil || b.config.MinRingSize != newConfig.MinRingSize || b.config.MaxRingSize != newConfig.MaxRingSize { + regenerateRing = true + } + b.config = newConfig + + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + if regenerateRing { + // Ring creation is guaranteed to not fail because we call newRing() + // with a non-empty subConns map. + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize, b.logger) + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // Successful resolution; clear resolver error and return nil. + b.resolverErr = nil + return nil +} + +func (b *ringhashBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// updateSubConnState updates the per-SubConn state stored in the ring, and also +// the aggregated state. +// +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. +func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) + } + scs, ok := b.scStates[sc] + if !ok { + b.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) + return + } + oldSCState := scs.effectiveState() + scs.setState(s) + newSCState := scs.effectiveState() + b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) + + b.state = b.csEvltr.recordTransition(oldSCState, newSCState) + + switch s { + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + if oldSCState != newSCState { + // Because the picker caches the state of the subconns, we always + // regenerate and update the picker when the effective SubConn state + // changes. + b.regeneratePicker() + b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, v := range b.subConns.Values() { + sc := v.(*subConn) + if sc.isAttemptingToConnect() { + return + } + } + + // Trigger a SubConn (the next in the order addresses appear in the + // resolver) to connect if nobody is attempting to connect. + b.lastInternallyTriggeredSCIndex = (b.lastInternallyTriggeredSCIndex + 1) % len(b.orderedSubConns) + b.orderedSubConns[b.lastInternallyTriggeredSCIndex].queueConnect() + } +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *ringhashBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *ringhashBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = base.NewErrPicker(b.mergeErrors()) + return + } + b.picker = newPicker(b.ring, b.logger) +} + +func (b *ringhashBalancer) Close() { + b.logger.Infof("Shutdown") +} + +func (b *ringhashBalancer) ExitIdle() { + // ExitIdle implementation is a no-op because connections are either + // triggers from picks or from subConn state changes. +} + +// connectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type connectivityStateEvaluator struct { + sum uint64 + nums [5]uint64 +} + +// recordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If there is at least one subchannel in READY state, report READY. +// - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. +// - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. +// - If there is at least one subchannel in Idle state, report Idle. +// - Otherwise, report TRANSIENT_FAILURE. +// +// Note that if there are 1 connecting, 2 transient failure, the overall state +// is transient failure. This is because the second transient failure is a +// fallback of the first failing SubConn, and we want to report transient +// failure to failover to the lower priority. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + cse.nums[state] += updateVal + } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } + + if cse.nums[connectivity.Ready] > 0 { + return connectivity.Ready + } + if cse.nums[connectivity.TransientFailure] > 1 { + return connectivity.TransientFailure + } + if cse.nums[connectivity.Connecting] > 0 { + return connectivity.Connecting + } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } + if cse.nums[connectivity.Idle] > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} + +// getWeightAttribute is a convenience function which returns the value of the +// weight attribute stored in the BalancerAttributes field of addr, using the +// weightedroundrobin package. +// +// When used in the xDS context, the weight attribute is guaranteed to be +// non-zero. But, when used in a non-xDS context, the weight attribute could be +// unset. A Default of 1 is used in the latter case. +func getWeightAttribute(addr resolver.Address) uint32 { + w := weightedroundrobin.GetAddrInfo(addr).Weight + if w == 0 { + return 1 + } + return w +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go new file mode 100644 index 000000000..92bb3ae5b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import "context" + +type clusterKey struct{} + +func getRequestHash(ctx context.Context) uint64 { + requestHash, _ := ctx.Value(clusterKey{}).(uint64) + return requestHash +} + +// GetRequestHashForTesting returns the request hash in the context; to be used +// for testing only. +func GetRequestHashForTesting(ctx context.Context) uint64 { + return getRequestHash(ctx) +} + +// SetRequestHash adds the request hash to the context for use in Ring Hash Load +// Balancing. +func SetRequestHash(ctx context.Context, requestHash uint64) context.Context { + return context.WithValue(ctx, clusterKey{}, requestHash) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go new file mode 100644 index 000000000..943ee7806 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package wrrlocality provides an implementation of the wrr locality LB policy, +// as defined in [A52 - xDS Custom LB Policies]. +// +// [A52 - xDS Custom LB Policies]: https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md +package wrrlocality + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/grpclog" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +// Name is the name of wrr_locality balancer. +const Name = "xds_wrr_locality_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Name() string { + return Name +} + +// LBConfig is the config for the wrr locality balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// To plumb in a different child in tests. +var weightedTargetName = weightedtarget.Name + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(weightedTargetName) + if builder == nil { + // Shouldn't happen, registered through imported weighted target, + // defensive programming. + return nil + } + + // Doesn't need to intercept any balancer.ClientConn operations; pass + // through by just giving cc to child balancer. + wtb := builder.Build(cc, bOpts) + if wtb == nil { + // shouldn't happen, defensive programming. + return nil + } + wtbCfgParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported weighted target builder has this method. + return nil + } + wrrL := &wrrLocalityBalancer{ + child: wtb, + childParser: wtbCfgParser, + } + + wrrL.logger = prefixLogger(wrrL) + wrrL.logger.Infof("Created") + return wrrL +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { + return nil, fmt.Errorf("xds_wrr_locality: invalid LBConfig: %s, error: %v", string(s), err) + } + if lbCfg == nil || lbCfg.ChildPolicy == nil { + return nil, errors.New("xds_wrr_locality: invalid LBConfig: child policy field must be set") + } + return lbCfg, nil +} + +type attributeKey struct{} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o any) bool { + oa, ok := o.(AddrInfo) + return ok && oa.LocalityWeight == a.LocalityWeight +} + +// AddrInfo is the locality weight of the locality an address is a part of. +type AddrInfo struct { + LocalityWeight uint32 +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with AddrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) +} + +// getAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. Returns false if no AddrInfo found. +func getAddrInfo(addr resolver.Address) (AddrInfo, bool) { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, ok := v.(AddrInfo) + return ai, ok +} + +// wrrLocalityBalancer wraps a weighted target balancer, and builds +// configuration for the weighted target once it receives configuration +// specifying the weighted target child balancer and locality weight +// information. +type wrrLocalityBalancer struct { + // child will be a weighted target balancer, and will be built it at + // wrrLocalityBalancer build time. Other than preparing configuration, other + // balancer operations are simply pass through. + child balancer.Balancer + + childParser balancer.ConfigParser + + logger *grpclog.PrefixLogger +} + +func (b *wrrLocalityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("Received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + weightedTargets := make(map[string]weightedtarget.Target) + for _, addr := range s.ResolverState.Addresses { + // This get of LocalityID could potentially return a zero value. This + // shouldn't happen though (this attribute that is set actually gets + // used to build localities in the first place), and thus don't error + // out, and just build a weighted target with undefined behavior. + locality, err := internal.GetLocalityID(addr).ToString() + if err != nil { + // Should never happen. + logger.Errorf("Failed to marshal LocalityID: %v, skipping this locality in weighted target") + } + ai, ok := getAddrInfo(addr) + if !ok { + return fmt.Errorf("xds_wrr_locality: missing locality weight information in address %q", addr) + } + weightedTargets[locality] = weightedtarget.Target{Weight: ai.LocalityWeight, ChildPolicy: lbCfg.ChildPolicy} + } + wtCfg := &weightedtarget.LBConfig{Targets: weightedTargets} + wtCfgJSON, err := json.Marshal(wtCfg) + if err != nil { + // Shouldn't happen. + return fmt.Errorf("xds_wrr_locality: error marshalling prepared config: %v", wtCfg) + } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childParser.ParseConfig(wtCfgJSON); err != nil { + return fmt.Errorf("xds_wrr_locality: config generated %v is invalid: %v", wtCfgJSON, err) + } + + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: sc, + }) +} + +func (b *wrrLocalityBalancer) ResolverError(err error) { + b.child.ResolverError(err) +} + +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *wrrLocalityBalancer) Close() { + b.child.Close() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go new file mode 100644 index 000000000..42ccea0a9 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[wrrlocality-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *wrrLocalityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go new file mode 100644 index 000000000..f81e2cab0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterspecifier contains the ClusterSpecifier interface and a registry for +// storing and retrieving their implementations. +package clusterspecifier + +import ( + "google.golang.org/protobuf/proto" +) + +// BalancerConfig is the Go Native JSON representation of a balancer +// configuration. +type BalancerConfig []map[string]any + +// ClusterSpecifier defines the parsing functionality of a Cluster Specifier. +type ClusterSpecifier interface { + // TypeURLs are the proto message types supported by this + // ClusterSpecifierPlugin. A ClusterSpecifierPlugin will be registered by + // each of its supported message types. + TypeURLs() []string + // ParseClusterSpecifierConfig parses the provided configuration + // proto.Message from the top level RDS configuration. The resulting + // BalancerConfig will be used as configuration for a child LB Policy of the + // Cluster Manager LB Policy. A nil BalancerConfig is invalid. + ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]ClusterSpecifier) +) + +// Register registers the ClusterSpecifierPlugin to the ClusterSpecifier map. +// cs.TypeURLs() will be used as the types for this ClusterSpecifierPlugin. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple cluster specifier +// plugins are registered with the same type URL, the one registered last will +// take effect. +func Register(cs ClusterSpecifier) { + for _, u := range cs.TypeURLs() { + m[u] = cs + } +} + +// Get returns the ClusterSpecifier registered with typeURL. +// +// If no cluster specifier is registered with typeURL, nil will be returned. +func Get(typeURL string) ClusterSpecifier { + return m[typeURL] +} + +// UnregisterForTesting unregisters the ClusterSpecifier for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go new file mode 100644 index 000000000..89837605c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS cluster specifier plugin. +package rls + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + clusterspecifier.Register(rls{}) + + // TODO: Remove these once the RLS env var is removed. + internal.RegisterRLSClusterSpecifierPluginForTesting = func() { + clusterspecifier.Register(rls{}) + } + internal.UnregisterRLSClusterSpecifierPluginForTesting = func() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } + } +} + +type rls struct{} + +func (rls) TypeURLs() []string { + return []string{"type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier"} +} + +// lbConfigJSON is the RLS LB Policies configuration in JSON format. +// RouteLookupConfig will be a raw JSON string from the passed in proto +// configuration, and the other fields will be hardcoded. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage `json:"routeLookupConfig"` + ChildPolicy []map[string]json.RawMessage `json:"childPolicy"` + ChildPolicyConfigTargetFieldName string `json:"childPolicyConfigTargetFieldName"` +} + +func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rls_csp: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) + } + rlcs := new(rlspb.RouteLookupClusterSpecifier) + + if err := m.UnmarshalTo(rlcs); err != nil { + return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) + } + rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling route lookup config: %v: %v", rlcs.GetRouteLookupConfig(), err) + } + lbCfgJSON := &lbConfigJSON{ + RouteLookupConfig: rlcJSON, // "JSON form of RouteLookupClusterSpecifier.config" - RLS in xDS Design Doc + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": json.RawMessage("{}"), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", + } + + rawJSON, err := json.Marshal(lbCfgJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) + } + + rlsBB := balancer.Get(internal.RLSLoadBalancingPolicyName) + if rlsBB == nil { + return nil, fmt.Errorf("RLS LB policy not registered") + } + if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing: %v", err) + } + + return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go new file mode 100644 index 000000000..5a8249059 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fault implements the Envoy Fault Injection HTTP filter. +package fault + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "strconv" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + cpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" + fpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" + tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +const headerAbortHTTPStatus = "x-envoy-fault-abort-request" +const headerAbortGRPCStatus = "x-envoy-fault-abort-grpc-request" +const headerAbortPercentage = "x-envoy-fault-abort-request-percentage" + +const headerDelayPercentage = "x-envoy-fault-delay-request-percentage" +const headerDelayDuration = "x-envoy-fault-delay-request" + +var statusMap = map[int]codes.Code{ + 400: codes.Internal, + 401: codes.Unauthenticated, + 403: codes.PermissionDenied, + 404: codes.Unimplemented, + 429: codes.Unavailable, + 502: codes.Unavailable, + 503: codes.Unavailable, + 504: codes.Unavailable, +} + +func init() { + httpfilter.Register(builder{}) +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + config *fpb.HTTPFault +} + +func (builder) TypeURLs() []string { + return []string{"type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault"} +} + +// Parsing is the same for the base config and the override config. +func parseConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("fault: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("fault: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(fpb.HTTPFault) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("fault: error parsing config %v: %v", cfg, err) + } + return config{config: msg}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return parseConfig(cfg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return parseConfig(override) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ClientInterceptorBuilder = builder{} + +func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("fault: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("fault: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("fault: incorrect override config type provided (%T): %v", override, override) + } + } + + icfg := c.config + if (icfg.GetMaxActiveFaults() != nil && icfg.GetMaxActiveFaults().GetValue() == 0) || + (icfg.GetDelay() == nil && icfg.GetAbort() == nil) { + return nil, nil + } + return &interceptor{config: icfg}, nil +} + +type interceptor struct { + config *fpb.HTTPFault +} + +var activeFaults uint32 // global active faults; accessed atomically + +func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { + if maxAF := i.config.GetMaxActiveFaults(); maxAF != nil { + defer atomic.AddUint32(&activeFaults, ^uint32(0)) // decrement counter + if af := atomic.AddUint32(&activeFaults, 1); af > maxAF.GetValue() { + // Would exceed maximum active fault limit. + return newStream(ctx, done) + } + } + + if err := injectDelay(ctx, i.config.GetDelay()); err != nil { + return nil, err + } + + if err := injectAbort(ctx, i.config.GetAbort()); err != nil { + if err == errOKStream { + return &okStream{ctx: ctx}, nil + } + return nil, err + } + return newStream(ctx, done) +} + +// For overriding in tests +var randIntn = rand.Intn +var newTimer = time.NewTimer + +func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { + numerator, denominator := splitPct(delayCfg.GetPercentage()) + var delay time.Duration + switch delayType := delayCfg.GetFaultDelaySecifier().(type) { + case *cpb.FaultDelay_FixedDelay: + delay = delayType.FixedDelay.AsDuration() + case *cpb.FaultDelay_HeaderDelay_: + md, _ := metadata.FromOutgoingContext(ctx) + v := md[headerDelayDuration] + if v == nil { + // No delay configured for this RPC. + return nil + } + ms, ok := parseIntFromMD(v) + if !ok { + // Malformed header; no delay. + return nil + } + delay = time.Duration(ms) * time.Millisecond + if v := md[headerDelayPercentage]; v != nil { + if num, ok := parseIntFromMD(v); ok && num < numerator { + numerator = num + } + } + } + if delay == 0 || randIntn(denominator) >= numerator { + return nil + } + t := newTimer(delay) + select { + case <-t.C: + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } + return nil +} + +func injectAbort(ctx context.Context, abortCfg *fpb.FaultAbort) error { + numerator, denominator := splitPct(abortCfg.GetPercentage()) + code := codes.OK + okCode := false + switch errType := abortCfg.GetErrorType().(type) { + case *fpb.FaultAbort_HttpStatus: + code, okCode = grpcFromHTTP(int(errType.HttpStatus)) + case *fpb.FaultAbort_GrpcStatus: + code, okCode = sanitizeGRPCCode(codes.Code(errType.GrpcStatus)), true + case *fpb.FaultAbort_HeaderAbort_: + md, _ := metadata.FromOutgoingContext(ctx) + if v := md[headerAbortHTTPStatus]; v != nil { + // HTTP status has priority over gRPC status. + if httpStatus, ok := parseIntFromMD(v); ok { + code, okCode = grpcFromHTTP(httpStatus) + } + } else if v := md[headerAbortGRPCStatus]; v != nil { + if grpcStatus, ok := parseIntFromMD(v); ok { + code, okCode = sanitizeGRPCCode(codes.Code(grpcStatus)), true + } + } + if v := md[headerAbortPercentage]; v != nil { + if num, ok := parseIntFromMD(v); ok && num < numerator { + numerator = num + } + } + } + if !okCode || randIntn(denominator) >= numerator { + return nil + } + if code == codes.OK { + return errOKStream + } + return status.Errorf(code, "RPC terminated due to fault injection") +} + +var errOKStream = errors.New("stream terminated early with OK status") + +// parseIntFromMD returns the integer in the last header or nil if parsing +// failed. +func parseIntFromMD(header []string) (int, bool) { + if len(header) == 0 { + return 0, false + } + v, err := strconv.Atoi(header[len(header)-1]) + return v, err == nil +} + +func splitPct(fp *tpb.FractionalPercent) (num int, den int) { + if fp == nil { + return 0, 100 + } + num = int(fp.GetNumerator()) + switch fp.GetDenominator() { + case tpb.FractionalPercent_HUNDRED: + return num, 100 + case tpb.FractionalPercent_TEN_THOUSAND: + return num, 10 * 1000 + case tpb.FractionalPercent_MILLION: + return num, 1000 * 1000 + } + return num, 100 +} + +func grpcFromHTTP(httpStatus int) (codes.Code, bool) { + if httpStatus < 200 || httpStatus >= 600 { + // Malformed; ignore this fault type. + return codes.OK, false + } + if c := statusMap[httpStatus]; c != codes.OK { + // OK = 0/the default for the map. + return c, true + } + // All undefined HTTP status codes convert to Unknown. HTTP status of 200 + // is "success", but gRPC converts to Unknown due to missing grpc status. + return codes.Unknown, true +} + +func sanitizeGRPCCode(c codes.Code) codes.Code { + if c > codes.Code(16) { + return codes.Unknown + } + return c +} + +type okStream struct { + ctx context.Context +} + +func (*okStream) Header() (metadata.MD, error) { return nil, nil } +func (*okStream) Trailer() metadata.MD { return nil } +func (*okStream) CloseSend() error { return nil } +func (o *okStream) Context() context.Context { return o.ctx } +func (*okStream) SendMsg(any) error { return io.EOF } +func (*okStream) RecvMsg(any) error { return io.EOF } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go new file mode 100644 index 000000000..4402f0ebf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package httpfilter contains the HTTPFilter interface and a registry for +// storing and retrieving their implementations. +package httpfilter + +import ( + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/protobuf/proto" +) + +// FilterConfig represents an opaque data structure holding configuration for a +// filter. Embed this interface to implement it. +type FilterConfig interface { + isFilterConfig() +} + +// Filter defines the parsing functionality of an HTTP filter. A Filter may +// optionally implement either ClientInterceptorBuilder or +// ServerInterceptorBuilder or both, indicating it is capable of working on the +// client side or server side or both, respectively. +type Filter interface { + // TypeURLs are the proto message types supported by this filter. A filter + // will be registered by each of its supported message types. + TypeURLs() []string + // ParseFilterConfig parses the provided configuration proto.Message from + // the LDS configuration of this filter. This may be an anypb.Any, a + // udpa.type.v1.TypedStruct, or an xds.type.v3.TypedStruct for filters that + // do not accept a custom type. The resulting FilterConfig will later be + // passed to Build. + ParseFilterConfig(proto.Message) (FilterConfig, error) + // ParseFilterConfigOverride parses the provided override configuration + // proto.Message from the RDS override configuration of this filter. This + // may be an anypb.Any, a udpa.type.v1.TypedStruct, or an + // xds.type.v3.TypedStruct for filters that do not accept a custom type. + // The resulting FilterConfig will later be passed to Build. + ParseFilterConfigOverride(proto.Message) (FilterConfig, error) + // IsTerminal returns whether this Filter is terminal or not (i.e. it must + // be last filter in the filter chain). + IsTerminal() bool +} + +// ClientInterceptorBuilder constructs a Client Interceptor. If this type is +// implemented by a Filter, it is capable of working on a client. +type ClientInterceptorBuilder interface { + // BuildClientInterceptor uses the FilterConfigs produced above to produce + // an HTTP filter interceptor for clients. config will always be non-nil, + // but override may be nil if no override config exists for the filter. It + // is valid for Build to return a nil Interceptor and a nil error. In this + // case, the RPC will not be intercepted by this filter. + BuildClientInterceptor(config, override FilterConfig) (iresolver.ClientInterceptor, error) +} + +// ServerInterceptorBuilder constructs a Server Interceptor. If this type is +// implemented by a Filter, it is capable of working on a server. +type ServerInterceptorBuilder interface { + // BuildServerInterceptor uses the FilterConfigs produced above to produce + // an HTTP filter interceptor for servers. config will always be non-nil, + // but override may be nil if no override config exists for the filter. It + // is valid for Build to return a nil Interceptor and a nil error. In this + // case, the RPC will not be intercepted by this filter. + BuildServerInterceptor(config, override FilterConfig) (iresolver.ServerInterceptor, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]Filter) +) + +// Register registers the HTTP filter Builder to the filter map. b.TypeURLs() +// will be used as the types for this filter. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple filters are +// registered with the same type URL, the one registered last will take effect. +func Register(b Filter) { + for _, u := range b.TypeURLs() { + m[u] = b + } +} + +// UnregisterForTesting unregisters the HTTP Filter for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} + +// Get returns the HTTPFilter registered with typeURL. +// +// If no filter is register with typeURL, nil will be returned. +func Get(typeURL string) Filter { + return m[typeURL] +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go new file mode 100644 index 000000000..bcda2ab05 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -0,0 +1,214 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rbac implements the Envoy RBAC HTTP filter. +package rbac + +import ( + "context" + "errors" + "fmt" + "strings" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" +) + +func init() { + httpfilter.Register(builder{}) + + // TODO: Remove these once the RBAC env var is removed. + internal.RegisterRBACHTTPFilterForTesting = func() { + httpfilter.Register(builder{}) + } + internal.UnregisterRBACHTTPFilterForTesting = func() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } + } +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + chainEngine *rbac.ChainEngine +} + +func (builder) TypeURLs() []string { + return []string{ + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute", + } +} + +// Parsing is the same for the base config and the override config. +func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { + // All the validation logic described in A41. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + // "Policy.condition and Policy.checked_condition must cause a + // validation failure if present." - A41 + if policy.Condition != nil { + return nil, errors.New("rbac: Policy.condition is present") + } + if policy.CheckedCondition != nil { + return nil, errors.New("rbac: policy.CheckedCondition is present") + } + + // "It is also a validation failure if Permission or Principal has a + // header matcher for a grpc- prefixed header name or :scheme." - A41 + for _, principal := range policy.Principals { + name := principal.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) + } + } + for _, permission := range policy.Permissions { + name := permission.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) + } + } + } + + // "Envoy aliases :authority and Host in its header map implementation, so + // they should be treated equivalent for the RBAC matchers; there must be no + // behavior change depending on which of the two header names is used in the + // RBAC policy." - A41. Loop through config's principals and policies, change + // any header matcher with value "host" to :authority", as that is what + // grpc-go shifts both headers to in transport layer. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + for _, principal := range policy.Principals { + if principal.GetHeader().GetName() == "host" { + principal.GetHeader().Name = ":authority" + } + } + for _, permission := range policy.Permissions { + if permission.GetHeader().GetName() == "host" { + permission.GetHeader().Name = ":authority" + } + } + } + + // Two cases where this HTTP Filter is a no op: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { + return config{}, nil + } + + // TODO(gregorycooke) - change the call chain to here so we have the filter + // name to input here instead of an empty string. It will come from here: + // https://github.com/grpc/grpc-go/blob/eff0942e95d93112921414aee758e619ec86f26f/xds/internal/xdsclient/xdsresource/unmarshal_lds.go#L199 + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") + if err != nil { + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { + return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) + } + } + + return config{chainEngine: ce}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(rpb.RBAC) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) + } + return parseConfig(msg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + m, ok := override.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) + } + msg := new(rpb.RBACPerRoute) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) + } + return parseConfig(msg.Rbac) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ServerInterceptorBuilder = builder{} + +// BuildServerInterceptor is an optional interface builder implements in order +// to signify it works server side. +func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override httpfilter.FilterConfig) (resolver.ServerInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect override config type provided (%T): %v", override, override) + } + } + + // RBAC HTTP Filter is a no op from one of these two cases: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if c.chainEngine == nil { + return nil, nil + } + return &interceptor{chainEngine: c.chainEngine}, nil +} + +type interceptor struct { + chainEngine *rbac.ChainEngine +} + +func (i *interceptor) AllowRPC(ctx context.Context) error { + return i.chainEngine.IsAuthorized(ctx) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go new file mode 100644 index 000000000..a781523d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package router implements the Envoy Router HTTP filter. +package router + +import ( + "fmt" + + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + pb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" +) + +// TypeURL is the message type for the Router configuration. +const TypeURL = "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + +func init() { + httpfilter.Register(builder{}) +} + +// IsRouterFilter returns true iff a HTTP filter is a Router filter. +func IsRouterFilter(b httpfilter.Filter) bool { + _, ok := b.(builder) + return ok +} + +type builder struct { +} + +func (builder) TypeURLs() []string { return []string{TypeURL} } + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + // The gRPC router filter does not currently use any fields from the + // config. Verify type only. + if cfg == nil { + return nil, fmt.Errorf("router: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("router: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(pb.Router) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("router: error parsing config %v: %v", cfg, err) + } + return config{}, nil +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override != nil { + return nil, fmt.Errorf("router: unexpected config override specified: %v", override) + } + return config{}, nil +} + +func (builder) IsTerminal() bool { + return true +} + +var ( + _ httpfilter.ClientInterceptorBuilder = builder{} + _ httpfilter.ServerInterceptorBuilder = builder{} +) + +func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is implemented within the xds resolver's config + // selector, not as a separate plugin. So we return a nil HTTPFilter, + // which will not be invoked. + return nil, nil +} + +func (builder) BuildServerInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is currently unimplemented on the server side. So we + // return a nil HTTPFilter, which will not be invoked. + return nil, nil +} + +// The gRPC router filter does not currently support any configuration. Verify +// type only. +type config struct { + httpfilter.FilterConfig +} diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go new file mode 100644 index 000000000..1d8a6b03f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functions/structs shared by xds +// balancers/resolvers. +package internal + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/resolver" +) + +// LocalityID is xds.Locality without XXX fields, so it can be used as map +// keys. +// +// xds.Locality cannot be map keys because one of the XXX fields is a slice. +type LocalityID struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"subZone,omitempty"` +} + +// ToString generates a string representation of LocalityID by marshalling it into +// json. Not calling it String() so printf won't call it. +func (l LocalityID) ToString() (string, error) { + b, err := json.Marshal(l) + if err != nil { + return "", err + } + return string(b), nil +} + +// Equal allows the values to be compared by Attributes.Equal. +func (l LocalityID) Equal(o any) bool { + ol, ok := o.(LocalityID) + if !ok { + return false + } + return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone +} + +// Empty returns whether or not the locality ID is empty. +func (l LocalityID) Empty() bool { + return l.Region == "" && l.Zone == "" && l.SubZone == "" +} + +// LocalityIDFromString converts a json representation of locality, into a +// LocalityID struct. +func LocalityIDFromString(s string) (ret LocalityID, _ error) { + err := json.Unmarshal([]byte(s), &ret) + if err != nil { + return LocalityID{}, fmt.Errorf("%s is not a well formatted locality ID, error: %v", s, err) + } + return ret, nil +} + +type localityKeyType string + +const localityKey = localityKeyType("grpc.xds.internal.address.locality") + +// GetLocalityID returns the locality ID of addr. +func GetLocalityID(addr resolver.Address) LocalityID { + path, _ := addr.BalancerAttributes.Value(localityKey).(LocalityID) + return path +} + +// SetLocalityID sets locality ID in addr to l. +func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) + return addr +} + +// ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. +var ResourceTypeMapForTesting map[string]any + +// UnknownCSMLabels are TelemetryLabels emitted from CDS if CSM Telemetry Label +// data is not present in the CDS Resource. +var UnknownCSMLabels = map[string]string{ + "csm.service_name": "unknown", + "csm.service_namespace_name": "unknown", +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go new file mode 100644 index 000000000..d9c232782 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go @@ -0,0 +1,30 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the xDS resolver. +package internal + +// The following variables are overridden in tests. +var ( + // NewWRR is the function used to create a new weighted round robin + // implementation. + NewWRR any // func() wrr.WRR + + // NewXDSClient is the function used to create a new xDS client. + NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go b/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go new file mode 100644 index 000000000..746b85af2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resolver %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *xdsResolver) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go new file mode 100644 index 000000000..36776f3de --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -0,0 +1,348 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + "encoding/json" + "fmt" + "math/bits" + "math/rand" + "strings" + "sync/atomic" + "time" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + cdsName = "cds_experimental" + xdsClusterManagerName = "xds_cluster_manager_experimental" + clusterPrefix = "cluster:" + clusterSpecifierPluginPrefix = "cluster_specifier_plugin:" +) + +type serviceConfig struct { + LoadBalancingConfig balancerConfig `json:"loadBalancingConfig"` +} + +type balancerConfig []map[string]any + +func newBalancerConfig(name string, config any) balancerConfig { + return []map[string]any{{name: config}} +} + +type cdsBalancerConfig struct { + Cluster string `json:"cluster"` +} + +type xdsChildConfig struct { + ChildPolicy balancerConfig `json:"childPolicy"` +} + +type xdsClusterManagerConfig struct { + Children map[string]xdsChildConfig `json:"children"` +} + +// serviceConfigJSON produces a service config in JSON format representing all +// the clusters referenced in activeClusters. This includes clusters with zero +// references, so they must be pruned first. +func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { + // Generate children (all entries in activeClusters). + children := make(map[string]xdsChildConfig) + for cluster, ci := range activeClusters { + children[cluster] = ci.cfg + } + + sc := serviceConfig{ + LoadBalancingConfig: newBalancerConfig( + xdsClusterManagerName, xdsClusterManagerConfig{Children: children}, + ), + } + + bs, err := json.Marshal(sc) + if err != nil { + return nil, fmt.Errorf("failed to marshal json: %v", err) + } + return bs, nil +} + +type virtualHost struct { + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig + // retry policy present in virtual host + retryConfig *xdsresource.RetryConfig +} + +// routeCluster holds information about a cluster as referenced by a route. +type routeCluster struct { + name string + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig +} + +type route struct { + m *xdsresource.CompositeMatcher // converted from route matchers + actionType xdsresource.RouteActionType // holds route action type + clusters wrr.WRR // holds *routeCluster entries + maxStreamDuration time.Duration + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig + retryConfig *xdsresource.RetryConfig + hashPolicies []*xdsresource.HashPolicy +} + +func (r route) String() string { + return fmt.Sprintf("%s -> { clusters: %v, maxStreamDuration: %v }", r.m.String(), r.clusters, r.maxStreamDuration) +} + +type configSelector struct { + r *xdsResolver + virtualHost virtualHost + routes []route + clusters map[string]*clusterInfo + httpFilterConfig []xdsresource.HTTPFilter +} + +var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") +var errUnsupportedClientRouteAction = status.Errorf(codes.Unavailable, "matched route does not have a supported route action type") + +func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + if cs == nil { + return nil, status.Errorf(codes.Unavailable, "no valid clusters") + } + var rt *route + // Loop through routes in order and select first match. + for _, r := range cs.routes { + if r.m.Match(rpcInfo) { + rt = &r + break + } + } + + if rt == nil || rt.clusters == nil { + return nil, errNoMatchedRouteFound + } + + if rt.actionType != xdsresource.RouteActionRoute { + return nil, errUnsupportedClientRouteAction + } + + cluster, ok := rt.clusters.Next().(*routeCluster) + if !ok { + return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) + } + + // Add a ref to the selected cluster, as this RPC needs this cluster until + // it is committed. + ref := &cs.clusters[cluster.name].refCount + atomic.AddInt32(ref, 1) + + interceptor, err := cs.newInterceptor(rt, cluster) + if err != nil { + return nil, err + } + + lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) + lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) + + config := &iresolver.RPCConfig{ + // Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy. + Context: lbCtx, + OnCommitted: func() { + // When the RPC is committed, the cluster is no longer required. + // Decrease its ref. + if v := atomic.AddInt32(ref, -1); v == 0 { + // This entry will be removed from activeClusters when + // producing the service config for the empty update. + cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.onClusterRefDownToZero() + }) + } + }, + Interceptor: interceptor, + } + + if rt.maxStreamDuration != 0 { + config.MethodConfig.Timeout = &rt.maxStreamDuration + } + if rt.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig) + } else if cs.virtualHost.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig) + } + + return config, nil +} + +func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy { + return &serviceconfig.RetryPolicy{ + MaxAttempts: int(config.NumRetries) + 1, + InitialBackoff: config.RetryBackoff.BaseInterval, + MaxBackoff: config.RetryBackoff.MaxInterval, + BackoffMultiplier: 2, + RetryableStatusCodes: config.RetryOn, + } +} + +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { + var hash uint64 + var generatedHash bool + var md, emd metadata.MD + var mdRead bool + for _, policy := range hashPolicies { + var policyHash uint64 + var generatedPolicyHash bool + switch policy.HashPolicyType { + case xdsresource.HashPolicyTypeHeader: + if strings.HasSuffix(policy.HeaderName, "-bin") { + continue + } + if !mdRead { + md, _ = metadata.FromOutgoingContext(rpcInfo.Context) + emd, _ = grpcutil.ExtraMetadata(rpcInfo.Context) + mdRead = true + } + values := emd.Get(policy.HeaderName) + if len(values) == 0 { + // Extra metadata (e.g. the "content-type" header) takes + // precedence over the user's metadata. + values = md.Get(policy.HeaderName) + if len(values) == 0 { + // If the header isn't present at all, this policy is a no-op. + continue + } + } + joinedValues := strings.Join(values, ",") + if policy.Regex != nil { + joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution) + } + policyHash = xxhash.Sum64String(joinedValues) + generatedHash = true + generatedPolicyHash = true + case xdsresource.HashPolicyTypeChannelID: + // Use the static channel ID as the hash for this policy. + policyHash = cs.r.channelID + generatedHash = true + generatedPolicyHash = true + } + + // Deterministically combine the hash policies. Rotating prevents + // duplicate hash policies from cancelling each other out and preserves + // the 64 bits of entropy. + if generatedPolicyHash { + hash = bits.RotateLeft64(hash, 1) + hash = hash ^ policyHash + } + + // If terminal policy and a hash has already been generated, ignore the + // rest of the policies and use that hash already generated. + if policy.Terminal && generatedHash { + break + } + } + + if generatedHash { + return hash + } + // If no generated hash return a random long. In the grand scheme of things + // this logically will map to choosing a random backend to route request to. + return rand.Uint64() +} + +func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { + if len(cs.httpFilterConfig) == 0 { + return nil, nil + } + interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig)) + for _, filter := range cs.httpFilterConfig { + override := cluster.httpFilterConfigOverride[filter.Name] // cluster is highest priority + if override == nil { + override = rt.httpFilterConfigOverride[filter.Name] // route is second priority + } + if override == nil { + override = cs.virtualHost.httpFilterConfigOverride[filter.Name] // VH is third & lowest priority + } + ib, ok := filter.Filter.(httpfilter.ClientInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return nil, fmt.Errorf("filter does not support use in client") + } + i, err := ib.BuildClientInterceptor(filter.Config, override) + if err != nil { + return nil, fmt.Errorf("error constructing filter: %v", err) + } + if i != nil { + interceptors = append(interceptors, i) + } + } + return &interceptorList{interceptors: interceptors}, nil +} + +// stop decrements refs of all clusters referenced by this config selector. +func (cs *configSelector) stop() { + // The resolver's old configSelector may be nil. Handle that here. + if cs == nil { + return + } + // If any refs drop to zero, we'll need a service config update to delete + // the cluster. + needUpdate := false + // Loops over cs.clusters, but these are pointers to entries in + // activeClusters. + for _, ci := range cs.clusters { + if v := atomic.AddInt32(&ci.refCount, -1); v == 0 { + needUpdate = true + } + } + // We stop the old config selector immediately after sending a new config + // selector; we need another update to delete clusters from the config (if + // we don't have another update pending already). + if needUpdate { + cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.onClusterRefDownToZero() + }) + } +} + +type interceptorList struct { + interceptors []iresolver.ClientInterceptor +} + +func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, _ func(), newStream func(ctx context.Context, _ func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { + for i := len(il.interceptors) - 1; i >= 0; i-- { + ns := newStream + interceptor := il.interceptors[i] + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return interceptor.NewStream(ctx, ri, done, ns) + } + } + return newStream(ctx, func() {}) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go new file mode 100644 index 000000000..0de660448 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type listenerWatcher struct { + resourceName string + cancel func() + parent *xdsResolver +} + +func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatcher { + lw := &listenerWatcher{resourceName: resourceName, parent: parent} + lw.cancel = xdsresource.WatchListener(parent.xdsClient, resourceName, lw) + return lw +} + +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone() } + l.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (l *listenerWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone() } + l.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone() } + l.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +func (l *listenerWatcher) stop() { + l.cancel() + l.parent.logger.Infof("Canceling watch on Listener resource %q", l.resourceName) +} + +type routeConfigWatcher struct { + resourceName string + cancel func() + parent *xdsResolver +} + +func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfigWatcher { + rw := &routeConfigWatcher{resourceName: resourceName, parent: parent} + rw.cancel = xdsresource.WatchRouteConfig(parent.xdsClient, resourceName, rw) + return rw +} + +func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource) + onDone() + } + r.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone() } + r.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone() } + r.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +func (r *routeConfigWatcher) stop() { + r.cancel() + r.parent.logger.Infof("Canceling watch on RouteConfiguration resource %q", r.resourceName) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go new file mode 100644 index 000000000..b5d24e4bf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -0,0 +1,570 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver implements the xds resolver, that does LDS and RDS to find +// the cluster to use. +package resolver + +import ( + "context" + "fmt" + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + rinternal "google.golang.org/grpc/xds/internal/resolver/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Scheme is the xDS resolver's scheme. +// +// TODO(easwars): Rename this package as xdsresolver so that this is accessed as +// xdsresolver.Scheme +const Scheme = "xds" + +// newBuilderForTesting creates a new xds resolver builder using a specific xds +// bootstrap config, so tests can use multiple xds clients in different +// ClientConns at the same time. +func newBuilderForTesting(config []byte) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) + }, + }, nil +} + +func init() { + resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting + + rinternal.NewWRR = wrr.NewRandom + rinternal.NewXDSClient = xdsclient.New +} + +type xdsResolverBuilder struct { + newXDSClient func(string) (xdsclient.XDSClient, func(), error) +} + +// Build helps implement the resolver.Builder interface. +// +// The xds bootstrap process is performed (and a new xds client is built) every +// time an xds resolver is built. +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { + r := &xdsResolver{ + cc: cc, + activeClusters: make(map[string]*clusterInfo), + channelID: rand.Uint64(), + } + defer func() { + if retErr != nil { + r.Close() + } + }() + r.logger = prefixLogger(r) + r.logger.Infof("Creating resolver for target: %+v", target) + + // Initialize the serializer used to synchronize the following: + // - updates from the xDS client. This could lead to generation of new + // service config if resolution is complete. + // - completion of an RPC to a removed cluster causing the associated ref + // count to become zero, resulting in generation of new service config. + // - stopping of a config selector that results in generation of new service + // config. + ctx, cancel := context.WithCancel(context.Background()) + r.serializer = grpcsync.NewCallbackSerializer(ctx) + r.serializerCancel = cancel + + // Initialize the xDS client. + newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error)) + if b.newXDSClient != nil { + newXDSClient = b.newXDSClient + } + client, closeFn, err := newXDSClient(target.String()) + if err != nil { + return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) + } + r.xdsClient = client + r.xdsClientClose = closeFn + + // Determine the listener resource name and start a watcher for it. + template, err := r.sanityChecksOnBootstrapConfig(target, opts, r.xdsClient) + if err != nil { + return nil, err + } + r.dataplaneAuthority = opts.Authority + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, target.Endpoint()) + r.listenerWatcher = newListenerWatcher(r.ldsResourceName, r) + return r, nil +} + +// Performs the following sanity checks: +// - Verifies that the bootstrap configuration is not empty. +// - Verifies that if xDS credentials are specified by the user, the +// bootstrap configuration contains certificate providers. +// - Verifies that if the provided dial target contains an authority, the +// bootstrap configuration contains server config for that authority. +// +// Returns the listener resource name template to use. If any of the above +// validations fail, a non-nil error is returned. +func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, _ resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { + bootstrapConfig := client.BootstrapConfig() + if bootstrapConfig == nil { + // This is never expected to happen after a successful xDS client + // creation. Defensive programming. + return "", fmt.Errorf("xds: bootstrap configuration is empty") + } + + // Find the client listener template to use from the bootstrap config: + // - If authority is not set in the target, use the top level template + // - If authority is set, use the template from the authority map. + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate() + if authority := target.URL.Host; authority != "" { + authorities := bootstrapConfig.Authorities() + if authorities == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + a := authorities[authority] + if a == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + if a.ClientListenerResourceNameTemplate != "" { + // This check will never be false, because + // ClientListenerResourceNameTemplate is required to start with + // xdstp://, and has a default value (not an empty string) if unset. + template = a.ClientListenerResourceNameTemplate + } + } + return template, nil +} + +// Name helps implement the resolver.Builder interface. +func (*xdsResolverBuilder) Scheme() string { + return Scheme +} + +// xdsResolver implements the resolver.Resolver interface. +// +// It registers a watcher for ServiceConfig updates with the xdsClient object +// (which performs LDS/RDS queries for the same), and passes the received +// updates to the ClientConn. +type xdsResolver struct { + cc resolver.ClientConn + logger *grpclog.PrefixLogger + // The underlying xdsClient which performs all xDS requests and responses. + xdsClient xdsclient.XDSClient + xdsClientClose func() + // A random number which uniquely identifies the channel which owns this + // resolver. + channelID uint64 + + // All methods on the xdsResolver type except for the ones invoked by gRPC, + // i.e ResolveNow() and Close(), are guaranteed to execute in the context of + // this serializer's callback. And since the serializer guarantees mutual + // exclusion among these callbacks, we can get by without any mutexes to + // access all of the below defined state. The only exception is Close(), + // which does access some of this shared state, but it does so after + // cancelling the context passed to the serializer. + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // dataplaneAuthority is the authority used for the data plane connections, + // which is also used to select the VirtualHost within the xDS + // RouteConfiguration. This is %-encoded to match with VirtualHost Domain + // in xDS RouteConfiguration. + dataplaneAuthority string + + ldsResourceName string + listenerWatcher *listenerWatcher + listenerUpdateRecvd bool + currentListener xdsresource.ListenerUpdate + + rdsResourceName string + routeConfigWatcher *routeConfigWatcher + routeConfigUpdateRecvd bool + currentRouteConfig xdsresource.RouteConfigUpdate + currentVirtualHost *xdsresource.VirtualHost // Matched virtual host for quick access. + + // activeClusters is a map from cluster name to information about the + // cluster that includes a ref count and load balancing configuration. + activeClusters map[string]*clusterInfo + + curConfigSelector *configSelector +} + +// ResolveNow is a no-op at this point. +func (*xdsResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (r *xdsResolver) Close() { + // Cancel the context passed to the serializer and wait for any scheduled + // callbacks to complete. Canceling the context ensures that no new + // callbacks will be scheduled. + r.serializerCancel() + <-r.serializer.Done() + + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + + if r.listenerWatcher != nil { + r.listenerWatcher.stop() + } + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + } + if r.xdsClientClose != nil { + r.xdsClientClose() + } + r.logger.Infof("Shutdown") +} + +// sendNewServiceConfig prunes active clusters, generates a new service config +// based on the current set of active clusters, and sends an update to the +// channel with that service config and the provided config selector. Returns +// false if an error occurs while generating the service config and the update +// cannot be sent. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { + // Delete entries from r.activeClusters with zero references; + // otherwise serviceConfigJSON will generate a config including + // them. + r.pruneActiveClusters() + + if cs == nil && len(r.activeClusters) == 0 { + // There are no clusters and we are sending a failing configSelector. + // Send an empty config, which picks pick-first, with no address, and + // puts the ClientConn into transient failure. + r.cc.UpdateState(resolver.State{ServiceConfig: r.cc.ParseServiceConfig("{}")}) + return true + } + + sc, err := serviceConfigJSON(r.activeClusters) + if err != nil { + // JSON marshal error; should never happen. + r.logger.Errorf("For Listener resource %q and RouteConfiguration resource %q, failed to marshal newly built service config: %v", r.ldsResourceName, r.rdsResourceName, err) + r.cc.ReportError(err) + return false + } + r.logger.Infof("For Listener resource %q and RouteConfiguration resource %q, generated service config: %v", r.ldsResourceName, r.rdsResourceName, pretty.FormatJSON(sc)) + + // Send the update to the ClientConn. + state := iresolver.SetConfigSelector(resolver.State{ + ServiceConfig: r.cc.ParseServiceConfig(string(sc)), + }, cs) + r.cc.UpdateState(xdsclient.SetClient(state, r.xdsClient)) + return true +} + +// newConfigSelector creates a new config selector using the most recently +// received listener and route config updates. May add entries to +// r.activeClusters for previously-unseen clusters. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) newConfigSelector() (*configSelector, error) { + cs := &configSelector{ + r: r, + virtualHost: virtualHost{ + httpFilterConfigOverride: r.currentVirtualHost.HTTPFilterConfigOverride, + retryConfig: r.currentVirtualHost.RetryConfig, + }, + routes: make([]route, len(r.currentVirtualHost.Routes)), + clusters: make(map[string]*clusterInfo), + httpFilterConfig: r.currentListener.HTTPFilters, + } + + for i, rt := range r.currentVirtualHost.Routes { + clusters := rinternal.NewWRR.(func() wrr.WRR)() + if rt.ClusterSpecifierPlugin != "" { + clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin + clusters.Add(&routeCluster{ + name: clusterName, + }, 1) + ci := r.addOrGetActiveClusterInfo(clusterName) + ci.cfg = xdsChildConfig{ChildPolicy: balancerConfig(r.currentRouteConfig.ClusterSpecifierPlugins[rt.ClusterSpecifierPlugin])} + cs.clusters[clusterName] = ci + } else { + for cluster, wc := range rt.WeightedClusters { + clusterName := clusterPrefix + cluster + clusters.Add(&routeCluster{ + name: clusterName, + httpFilterConfigOverride: wc.HTTPFilterConfigOverride, + }, int64(wc.Weight)) + ci := r.addOrGetActiveClusterInfo(clusterName) + ci.cfg = xdsChildConfig{ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster})} + cs.clusters[clusterName] = ci + } + } + cs.routes[i].clusters = clusters + + var err error + cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) + if err != nil { + return nil, err + } + cs.routes[i].actionType = rt.ActionType + if rt.MaxStreamDuration == nil { + cs.routes[i].maxStreamDuration = r.currentListener.MaxStreamDuration + } else { + cs.routes[i].maxStreamDuration = *rt.MaxStreamDuration + } + + cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].retryConfig = rt.RetryConfig + cs.routes[i].hashPolicies = rt.HashPolicies + } + + // Account for this config selector's clusters. Do this after no further + // errors may occur. Note: cs.clusters are pointers to entries in + // activeClusters. + for _, ci := range cs.clusters { + atomic.AddInt32(&ci.refCount, 1) + } + + return cs, nil +} + +// pruneActiveClusters deletes entries in r.activeClusters with zero +// references. +func (r *xdsResolver) pruneActiveClusters() { + for cluster, ci := range r.activeClusters { + if atomic.LoadInt32(&ci.refCount) == 0 { + delete(r.activeClusters, cluster) + } + } +} + +func (r *xdsResolver) addOrGetActiveClusterInfo(name string) *clusterInfo { + ci := r.activeClusters[name] + if ci != nil { + return ci + } + + ci = &clusterInfo{refCount: 0} + r.activeClusters[name] = ci + return ci +} + +type clusterInfo struct { + // number of references to this cluster; accessed atomically + refCount int32 + // cfg is the child configuration for this cluster, containing either the + // csp config or the cds cluster config. + cfg xdsChildConfig +} + +// Determines if the xdsResolver has received all required configuration, i.e +// Listener and RouteConfiguration resources, from the management server, and +// whether a matching virtual host was found in the RouteConfiguration resource. +func (r *xdsResolver) resolutionComplete() bool { + return r.listenerUpdateRecvd && r.routeConfigUpdateRecvd && r.currentVirtualHost != nil +} + +// onResolutionComplete performs the following actions when resolution is +// complete, i.e Listener and RouteConfiguration resources have been received +// from the management server and a matching virtual host is found in the +// latter. +// - creates a new config selector (this involves incrementing references to +// clusters owned by this config selector). +// - stops the old config selector (this involves decrementing references to +// clusters owned by this config selector). +// - prunes active clusters and pushes a new service config to the channel. +// - updates the current config selector used by the resolver. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onResolutionComplete() { + if !r.resolutionComplete() { + return + } + + cs, err := r.newConfigSelector() + if err != nil { + r.logger.Warningf("Failed to build a config selector for resource %q: %v", r.ldsResourceName, err) + r.cc.ReportError(err) + return + } + + if !r.sendNewServiceConfig(cs) { + // JSON error creating the service config (unexpected); erase + // this config selector and ignore this update, continuing with + // the previous config selector. + cs.stop() + return + } + + r.curConfigSelector.stop() + r.curConfigSelector = cs +} + +func (r *xdsResolver) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) { + matchVh := xdsresource.FindBestMatchingVirtualHost(r.dataplaneAuthority, update.VirtualHosts) + if matchVh == nil { + r.onError(fmt.Errorf("no matching virtual host found for %q", r.dataplaneAuthority)) + return + } + r.currentRouteConfig = update + r.currentVirtualHost = matchVh + r.routeConfigUpdateRecvd = true + + r.onResolutionComplete() +} + +// onError propagates the error up to the channel. And since this is invoked +// only for non resource-not-found errors, we don't have to update resolver +// state and we can keep using the old config. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onError(err error) { + r.cc.ReportError(err) +} + +// Contains common functionality to be executed when resources of either type +// are removed. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onResourceNotFound() { + // We cannot remove clusters from the service config that have ongoing RPCs. + // Instead, what we can do is to send an erroring (nil) config selector + // along with normal service config. This will ensure that new RPCs will + // fail, and once the active RPCs complete, the reference counts on the + // clusters will come down to zero. At that point, we will send an empty + // service config with no addresses. This results in the pick-first + // LB policy being configured on the channel, and since there are no + // address, pick-first will put the channel in TRANSIENT_FAILURE. + r.sendNewServiceConfig(nil) + + // Stop and dereference the active config selector, if one exists. + r.curConfigSelector.stop() + r.curConfigSelector = nil +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onListenerResourceUpdate(update xdsresource.ListenerUpdate) { + if r.logger.V(2) { + r.logger.Infof("Received update for Listener resource %q: %v", r.ldsResourceName, pretty.ToJSON(update)) + } + + r.currentListener = update + r.listenerUpdateRecvd = true + + if update.InlineRouteConfig != nil { + // If there was a previous route config watcher because of a non-inline + // route configuration, cancel it. + r.rdsResourceName = "" + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + r.routeConfigWatcher = nil + } + + r.applyRouteConfigUpdate(*update.InlineRouteConfig) + return + } + + // We get here only if there was no inline route configuration. + + // If the route config name has not changed, send an update with existing + // route configuration and the newly received listener configuration. + if r.rdsResourceName == update.RouteConfigName { + r.onResolutionComplete() + return + } + + // If the route config name has changed, cancel the old watcher and start a + // new one. At this point, since we have not yet resolved the new route + // config name, we don't send an update to the channel, and therefore + // continue using the old route configuration (if received) until the new + // one is received. + r.rdsResourceName = update.RouteConfigName + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + r.currentVirtualHost = nil + r.routeConfigUpdateRecvd = false + } + r.routeConfigWatcher = newRouteConfigWatcher(r.rdsResourceName, r) +} + +func (r *xdsResolver) onListenerResourceError(err error) { + if r.logger.V(2) { + r.logger.Infof("Received error for Listener resource %q: %v", r.ldsResourceName, err) + } + r.onError(err) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onListenerResourceNotFound() { + if r.logger.V(2) { + r.logger.Infof("Received resource-not-found-error for Listener resource %q", r.ldsResourceName) + } + + r.listenerUpdateRecvd = false + + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + } + r.rdsResourceName = "" + r.currentVirtualHost = nil + r.routeConfigUpdateRecvd = false + r.routeConfigWatcher = nil + + r.onResourceNotFound() +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceUpdate(name string, update xdsresource.RouteConfigUpdate) { + if r.logger.V(2) { + r.logger.Infof("Received update for RouteConfiguration resource %q: %v", name, pretty.ToJSON(update)) + } + + if r.rdsResourceName != name { + // Drop updates from canceled watchers. + return + } + + r.applyRouteConfigUpdate(update) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceError(name string, err error) { + if r.logger.V(2) { + r.logger.Infof("Received error for RouteConfiguration resource %q: %v", name, err) + } + r.onError(err) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceNotFound(name string) { + if r.logger.V(2) { + r.logger.Infof("Received resource-not-found-error for RouteConfiguration resource %q", name) + } + + if r.rdsResourceName != name { + return + } + r.onResourceNotFound() +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onClusterRefDownToZero() { + r.sendNewServiceConfig(r.curConfigSelector) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go new file mode 100644 index 000000000..92d07e7fb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// connWrapper is a thin wrapper around a net.Conn returned by Accept(). It +// provides the following additional functionality: +// 1. A way to retrieve the configured deadline. This is required by the +// ServerHandshake() method of the xdsCredentials when it attempts to read +// key material from the certificate providers. +// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to +// retrieve the configured certificate providers. +// 3. xDS filter_chain configuration determines security configuration. +// 4. Dynamically reads routing configuration in UsableRouteConfiguration(), called +// to process incoming RPC's. (LDS + RDS configuration). +type connWrapper struct { + net.Conn + + // The specific filter chain picked for handling this connection. + filterChain *xdsresource.FilterChain + + // A reference to the listenerWrapper on which this connection was accepted. + parent *listenerWrapper + + // The certificate providers created for this connection. + rootProvider, identityProvider certprovider.Provider + + // The connection deadline as configured by the grpc.Server on the rawConn + // that is returned by a call to Accept(). This is set to the connection + // timeout value configured by the user (or to a default value) before + // initiating the transport credential handshake, and set to zero after + // completing the HTTP2 handshake. + deadlineMu sync.Mutex + deadline time.Time + + mu sync.Mutex + st transport.ServerTransport + draining bool + + // The virtual hosts with matchable routes and instantiated HTTP Filters per + // route, or an error. + urc *atomic.Pointer[xdsresource.UsableRouteConfiguration] +} + +// UsableRouteConfiguration returns the UsableRouteConfiguration to be used for +// server side routing. +func (c *connWrapper) UsableRouteConfiguration() xdsresource.UsableRouteConfiguration { + return *c.urc.Load() +} + +// SetDeadline makes a copy of the passed in deadline and forwards the call to +// the underlying rawConn. +func (c *connWrapper) SetDeadline(t time.Time) error { + c.deadlineMu.Lock() + c.deadline = t + c.deadlineMu.Unlock() + return c.Conn.SetDeadline(t) +} + +// GetDeadline returns the configured deadline. This will be invoked by the +// ServerHandshake() method of the XdsCredentials, which needs a deadline to +// pass to the certificate provider. +func (c *connWrapper) GetDeadline() time.Time { + c.deadlineMu.Lock() + t := c.deadline + c.deadlineMu.Unlock() + return t +} + +// XDSHandshakeInfo returns a HandshakeInfo with appropriate security +// configuration for this connection. This method is invoked by the +// ServerHandshake() method of the XdsCredentials. +func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { + if c.filterChain.SecurityCfg == nil { + // If the security config is empty, this means that the control plane + // did not provide any security configuration and therefore we should + // return an empty HandshakeInfo here so that the xdsCreds can use the + // configured fallback credentials. + return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil + } + + cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() + // Identity provider name is mandatory on the server-side, and this is + // enforced when the resource is received at the XDSClient layer. + secCfg := c.filterChain.SecurityCfg + ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) + if err != nil { + return nil, err + } + // Root provider name is optional and required only when doing mTLS. + var rp certprovider.Provider + if instance, cert := secCfg.RootInstanceName, secCfg.RootCertName; instance != "" { + rp, err = buildProviderFunc(cpc, instance, cert, false, true) + if err != nil { + return nil, err + } + } + c.identityProvider = ip + c.rootProvider = rp + + return xdsinternal.NewHandshakeInfo(c.rootProvider, c.identityProvider, nil, secCfg.RequireClientCert), nil +} + +// PassServerTransport drains the passed in ServerTransport if draining is set, +// or persists it to be drained once drained is called. +func (c *connWrapper) PassServerTransport(st transport.ServerTransport) { + c.mu.Lock() + defer c.mu.Unlock() + if c.draining { + st.Drain("draining") + } else { + c.st = st + } +} + +// Drain drains the associated ServerTransport, or sets draining to true so it +// will be drained after it is created. +func (c *connWrapper) Drain() { + c.mu.Lock() + defer c.mu.Unlock() + if c.st == nil { + c.draining = true + } else { + c.st.Drain("draining") + } +} + +// Close closes the providers and the underlying connection. +func (c *connWrapper) Close() error { + if c.identityProvider != nil { + c.identityProvider.Close() + } + if c.rootProvider != nil { + c.rootProvider.Close() + } + c.parent.removeConn(c) + return c.Conn.Close() +} + +func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanceName, certName string, wantIdentity, wantRoot bool) (certprovider.Provider, error) { + cfg, ok := configs[instanceName] + if !ok { + // Defensive programming. If a resource received from the management + // server contains a certificate provider instance name that is not + // found in the bootstrap, the resource is NACKed by the xDS client. + return nil, fmt.Errorf("certificate provider instance %q not found in bootstrap file", instanceName) + } + provider, err := cfg.Build(certprovider.BuildOptions{ + CertName: certName, + WantIdentity: wantIdentity, + WantRoot: wantRoot, + }) + if err != nil { + // This error is not expected since the bootstrap process parses the + // config and makes sure that it is acceptable to the plugin. Still, it + // is possible that the plugin parses the config successfully, but its + // Build() method errors out. + return nil, fmt.Errorf("failed to get security plugin instance (%+v): %v", cfg, err) + } + return provider, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go new file mode 100644 index 000000000..09d320018 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package server contains internal server-side functionality used by the public +// facing xds package. +package server + +import ( + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + internalbackoff "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +var ( + logger = grpclog.Component("xds") + + // Backoff strategy for temporary errors received from Accept(). If this + // needs to be configurable, we can inject it through ListenerWrapperParams. + bs = internalbackoff.Exponential{Config: backoff.Config{ + BaseDelay: 5 * time.Millisecond, + Multiplier: 2.0, + MaxDelay: 1 * time.Second, + }} + backoffFunc = bs.Backoff +) + +// ServingModeCallback is the callback that users can register to get notified +// about the server's serving mode changes. The callback is invoked with the +// address of the listener and its new mode. The err parameter is set to a +// non-nil error if the server has transitioned into not-serving mode. +type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err error) + +// XDSClient wraps the methods on the XDSClient which are required by +// the listenerWrapper. +type XDSClient interface { + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + BootstrapConfig() *bootstrap.Config +} + +// ListenerWrapperParams wraps parameters required to create a listenerWrapper. +type ListenerWrapperParams struct { + // Listener is the net.Listener passed by the user that is to be wrapped. + Listener net.Listener + // ListenerResourceName is the xDS Listener resource to request. + ListenerResourceName string + // XDSClient provides the functionality from the XDSClient required here. + XDSClient XDSClient + // ModeCallback is the callback to invoke when the serving mode changes. + ModeCallback ServingModeCallback +} + +// NewListenerWrapper creates a new listenerWrapper with params. It returns a +// net.Listener. It starts in state not serving, which triggers Accept() + +// Close() on any incoming connections. +// +// Only TCP listeners are supported. +func NewListenerWrapper(params ListenerWrapperParams) net.Listener { + lw := &listenerWrapper{ + Listener: params.Listener, + name: params.ListenerResourceName, + xdsC: params.XDSClient, + modeCallback: params.ModeCallback, + isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), + conns: make(map[*connWrapper]bool), + + mode: connectivity.ServingModeNotServing, + closed: grpcsync.NewEvent(), + } + lw.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", lw)) + + // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to + // ignore the error from SplitHostPort(). + lisAddr := lw.Listener.Addr().String() + lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + + lw.rdsHandler = newRDSHandler(lw.handleRDSUpdate, lw.xdsC, lw.logger) + lw.cancelWatch = xdsresource.WatchListener(lw.xdsC, lw.name, &ldsWatcher{ + parent: lw, + logger: lw.logger, + name: lw.name, + }) + return lw +} + +// listenerWrapper wraps the net.Listener associated with the listening address +// passed to Serve(). It also contains all other state associated with this +// particular invocation of Serve(). +type listenerWrapper struct { + net.Listener + logger *internalgrpclog.PrefixLogger + + name string + xdsC XDSClient + cancelWatch func() + modeCallback ServingModeCallback + + // Set to true if the listener is bound to the IP_ANY address (which is + // "0.0.0.0" for IPv4 and "::" for IPv6). + isUnspecifiedAddr bool + // Listening address and port. Used to validate the socket address in the + // Listener resource received from the control plane. + addr, port string + + // A small race exists in the XDSClient code between the receipt of an xDS + // response and the user cancelling the associated watch. In this window, + // the registered callback may be invoked after the watch is canceled, and + // the user is expected to work around this. This event signifies that the + // listener is closed (and hence the watch is cancelled), and we drop any + // updates received in the callback if this event has fired. + closed *grpcsync.Event + + // mu guards access to the current serving mode and the active filter chain + // manager. + mu sync.Mutex + // Current serving mode. + mode connectivity.ServingMode + // Filter chain manager currently serving. + activeFilterChainManager *xdsresource.FilterChainManager + // conns accepted with configuration from activeFilterChainManager. + conns map[*connWrapper]bool + + // These fields are read/written to in the context of xDS updates, which are + // guaranteed to be emitted synchronously from the xDS Client. Thus, they do + // not need further synchronization. + + // Pending filter chain manager. Will go active once rdsHandler has received + // all the RDS resources this filter chain manager needs. + pendingFilterChainManager *xdsresource.FilterChainManager + + // rdsHandler is used for any dynamic RDS resources specified in a LDS + // update. + rdsHandler *rdsHandler +} + +func (l *listenerWrapper) handleLDSUpdate(update xdsresource.ListenerUpdate) { + ilc := update.InboundListenerCfg + // Make sure that the socket address on the received Listener resource + // matches the address of the net.Listener passed to us by the user. This + // check is done here instead of at the XDSClient layer because of the + // following couple of reasons: + // - XDSClient cannot know the listening address of every listener in the + // system, and hence cannot perform this check. + // - this is a very context-dependent check and only the server has the + // appropriate context to perform this check. + // + // What this means is that the XDSClient has ACKed a resource which can push + // the server into a "not serving" mode. This is not ideal, but this is + // what we have decided to do. + if ilc.Address != l.addr || ilc.Port != l.port { + l.mu.Lock() + l.switchModeLocked(connectivity.ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) + l.mu.Unlock() + return + } + + l.pendingFilterChainManager = ilc.FilterChains + l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + + if l.rdsHandler.determineRouteConfigurationReady() { + l.maybeUpdateFilterChains() + } +} + +// maybeUpdateFilterChains swaps in the pending filter chain manager to the +// active one if the pending filter chain manager is present. If a swap occurs, +// it also drains (gracefully stops) any connections that were accepted on the +// old active filter chain manager, and puts this listener in state SERVING. +// Must be called within an xDS Client Callback. +func (l *listenerWrapper) maybeUpdateFilterChains() { + if l.pendingFilterChainManager == nil { + // Nothing to update, return early. + return + } + + l.mu.Lock() + l.switchModeLocked(connectivity.ServingModeServing, nil) + // "Updates to a Listener cause all older connections on that Listener to be + // gracefully shut down with a grace period of 10 minutes for long-lived + // RPC's, such that clients will reconnect and have the updated + // configuration apply." - A36 + connsToClose := l.conns + l.conns = make(map[*connWrapper]bool) + l.activeFilterChainManager = l.pendingFilterChainManager + l.pendingFilterChainManager = nil + l.instantiateFilterChainRoutingConfigurationsLocked() + l.mu.Unlock() + go func() { + for conn := range connsToClose { + conn.Drain() + } + }() + +} + +// handleRDSUpdate rebuilds any routing configuration server side for any filter +// chains that point to this RDS, and potentially makes pending lds +// configuration to swap to be active. +func (l *listenerWrapper) handleRDSUpdate(routeName string, rcu rdsWatcherUpdate) { + // Update any filter chains that point to this route configuration. + if l.activeFilterChainManager != nil { + for _, fc := range l.activeFilterChainManager.FilterChains() { + if fc.RouteConfigName != routeName { + continue + } + if rcu.err != nil && rcu.data == nil { // Either NACK before update, or resource not found triggers this conditional. + fc.UsableRouteConfiguration.Store(&xdsresource.UsableRouteConfiguration{ + Err: rcu.err, + }) + continue + } + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*rcu.data)) + } + } + if l.rdsHandler.determineRouteConfigurationReady() { + l.maybeUpdateFilterChains() + } +} + +// instantiateFilterChainRoutingConfigurationsLocked instantiates all of the +// routing configuration for the newly active filter chains. For any inline +// route configurations, uses that, otherwise uses cached rdsHandler updates. +// Must be called within an xDS Client Callback. +func (l *listenerWrapper) instantiateFilterChainRoutingConfigurationsLocked() { + for _, fc := range l.activeFilterChainManager.FilterChains() { + if fc.InlineRouteConfig != nil { + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*fc.InlineRouteConfig)) // Can't race with an RPC coming in but no harm making atomic. + continue + } // Inline configuration constructed once here, will remain for lifetime of filter chain. + rcu := l.rdsHandler.updates[fc.RouteConfigName] + if rcu.err != nil && rcu.data == nil { + fc.UsableRouteConfiguration.Store(&xdsresource.UsableRouteConfiguration{Err: rcu.err}) + continue + } + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*rcu.data)) // Can't race with an RPC coming in but no harm making atomic. + } +} + +// Accept blocks on an Accept() on the underlying listener, and wraps the +// returned net.connWrapper with the configured certificate providers. +func (l *listenerWrapper) Accept() (net.Conn, error) { + var retries int + for { + conn, err := l.Listener.Accept() + if err != nil { + // Temporary() method is implemented by certain error types returned + // from the net package, and it is useful for us to not shutdown the + // server in these conditions. The listen queue being full is one + // such case. + if ne, ok := err.(interface{ Temporary() bool }); !ok || !ne.Temporary() { + return nil, err + } + retries++ + timer := time.NewTimer(backoffFunc(retries)) + select { + case <-timer.C: + case <-l.closed.Done(): + timer.Stop() + // Continuing here will cause us to call Accept() again + // which will return a non-temporary error. + continue + } + continue + } + // Reset retries after a successful Accept(). + retries = 0 + + // Since the net.Conn represents an incoming connection, the source and + // destination address can be retrieved from the local address and + // remote address of the net.Conn respectively. + destAddr, ok1 := conn.LocalAddr().(*net.TCPAddr) + srcAddr, ok2 := conn.RemoteAddr().(*net.TCPAddr) + if !ok1 || !ok2 { + // If the incoming connection is not a TCP connection, which is + // really unexpected since we check whether the provided listener is + // a TCP listener in Serve(), we return an error which would cause + // us to stop serving. + return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr()) + } + + l.mu.Lock() + if l.mode == connectivity.ServingModeNotServing { + // Close connections as soon as we accept them when we are in + // "not-serving" mode. Since we accept a net.Listener from the user + // in Serve(), we cannot close the listener when we move to + // "not-serving". Closing the connection immediately upon accepting + // is one of the other ways to implement the "not-serving" mode as + // outlined in gRFC A36. + l.mu.Unlock() + conn.Close() + continue + } + + fc, err := l.activeFilterChainManager.Lookup(xdsresource.FilterChainLookupParams{ + IsUnspecifiedListener: l.isUnspecifiedAddr, + DestAddr: destAddr.IP, + SourceAddr: srcAddr.IP, + SourcePort: srcAddr.Port, + }) + if err != nil { + l.mu.Unlock() + // When a matching filter chain is not found, we close the + // connection right away, but do not return an error back to + // `grpc.Serve()` from where this Accept() was invoked. Returning an + // error to `grpc.Serve()` causes the server to shutdown. If we want + // to avoid the server from shutting down, we would need to return + // an error type which implements the `Temporary() bool` method, + // which is invoked by `grpc.Serve()` to see if the returned error + // represents a temporary condition. In the case of a temporary + // error, `grpc.Serve()` method sleeps for a small duration and + // therefore ends up blocking all connection attempts during that + // time frame, which is also not ideal for an error like this. + l.logger.Warningf("Connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr(), conn.LocalAddr()) + conn.Close() + continue + } + cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.UsableRouteConfiguration} + l.conns[cw] = true + l.mu.Unlock() + return cw, nil + } +} + +func (l *listenerWrapper) removeConn(conn *connWrapper) { + l.mu.Lock() + defer l.mu.Unlock() + delete(l.conns, conn) +} + +// Close closes the underlying listener. It also cancels the xDS watch +// registered in Serve() and closes any certificate provider instances created +// based on security configuration received in the LDS response. +func (l *listenerWrapper) Close() error { + l.closed.Fire() + l.Listener.Close() + if l.cancelWatch != nil { + l.cancelWatch() + } + l.rdsHandler.close() + return nil +} + +// switchModeLocked switches the current mode of the listener wrapper. It also +// gracefully closes any connections if the listener wrapper transitions into +// not serving. If the serving mode has changed, it invokes the registered mode +// change callback. +func (l *listenerWrapper) switchModeLocked(newMode connectivity.ServingMode, err error) { + if l.mode == newMode && l.mode == connectivity.ServingModeServing { + // Redundant updates are suppressed only when we are SERVING and the new + // mode is also SERVING. In the other case (where we are NOT_SERVING and the + // new mode is also NOT_SERVING), the update is not suppressed as: + // 1. the error may have change + // 2. it provides a timestamp of the last backoff attempt + return + } + l.mode = newMode + if l.mode == connectivity.ServingModeNotServing { + connsToClose := l.conns + l.conns = make(map[*connWrapper]bool) + go func() { + for conn := range connsToClose { + conn.Drain() + } + }() + } + // The XdsServer API will allow applications to register a "serving state" + // callback to be invoked when the server begins serving and when the + // server encounters errors that force it to be "not serving". If "not + // serving", the callback must be provided error information, for + // debugging use by developers - A36. + if l.modeCallback != nil { + l.modeCallback(l.Listener.Addr(), newMode, err) + } +} + +func (l *listenerWrapper) onLDSResourceDoesNotExist(err error) { + l.mu.Lock() + defer l.mu.Unlock() + l.switchModeLocked(connectivity.ServingModeNotServing, err) + l.activeFilterChainManager = nil + l.pendingFilterChainManager = nil + l.rdsHandler.updateRouteNamesToWatch(make(map[string]bool)) +} + +// ldsWatcher implements the xdsresource.ListenerWatcher interface and is +// passed to the WatchListener API. +type ldsWatcher struct { + parent *listenerWrapper + logger *internalgrpclog.PrefixLogger + name string +} + +func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q received update: %#v", lw.name, update.Resource) + } + lw.parent.handleLDSUpdate(update.Resource) +} + +func (lw *ldsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q reported error: %v", lw.name, err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. +} + +func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q reported resource-does-not-exist error", lw.name) + } + + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Listener not found in received response", lw.name) + lw.parent.onLDSResourceDoesNotExist(err) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go new file mode 100644 index 000000000..bcd3938e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -0,0 +1,191 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "sync" + + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// rdsHandler handles any RDS queries that need to be started for a given server +// side listeners Filter Chains (i.e. not inline). It persists rdsWatcher +// updates for later use and also determines whether all the rdsWatcher updates +// needed have been received or not. +type rdsHandler struct { + xdsC XDSClient + logger *igrpclog.PrefixLogger + + callback func(string, rdsWatcherUpdate) + + // updates is a map from routeName to rdsWatcher update, including + // RouteConfiguration resources and any errors received. If not written in + // this map, no RouteConfiguration or error for that route name yet. If + // update set in value, use that as valid route configuration, otherwise + // treat as an error case and fail at L7 level. + updates map[string]rdsWatcherUpdate + + mu sync.Mutex + cancels map[string]func() +} + +// newRDSHandler creates a new rdsHandler to watch for RouteConfiguration +// resources. listenerWrapper updates the list of route names to watch by +// calling updateRouteNamesToWatch() upon receipt of new Listener configuration. +func newRDSHandler(cb func(string, rdsWatcherUpdate), xdsC XDSClient, logger *igrpclog.PrefixLogger) *rdsHandler { + return &rdsHandler{ + xdsC: xdsC, + logger: logger, + callback: cb, + updates: make(map[string]rdsWatcherUpdate), + cancels: make(map[string]func()), + } +} + +// updateRouteNamesToWatch handles a list of route names to watch for a given +// server side listener (if a filter chain specifies dynamic +// RouteConfiguration). This function handles all the logic with respect to any +// routes that may have been added or deleted as compared to what was previously +// present. Must be called within an xDS Client callback. +func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) { + rh.mu.Lock() + defer rh.mu.Unlock() + // Add and start watches for any new routes in routeNamesToWatch. + for routeName := range routeNamesToWatch { + if _, ok := rh.cancels[routeName]; !ok { + // The xDS client keeps a reference to the watcher until the cancel + // func is invoked. So, we don't need to keep a reference for fear + // of it being garbage collected. + w := &rdsWatcher{parent: rh, routeName: routeName} + cancel := xdsresource.WatchRouteConfig(rh.xdsC, routeName, w) + // Set bit on cancel function to eat any RouteConfiguration calls + // for this watcher after it has been canceled. + rh.cancels[routeName] = func() { + w.mu.Lock() + w.canceled = true + w.mu.Unlock() + cancel() + } + } + } + + // Delete and cancel watches for any routes from persisted routeNamesToWatch + // that are no longer present. + for routeName := range rh.cancels { + if _, ok := routeNamesToWatch[routeName]; !ok { + rh.cancels[routeName]() + delete(rh.cancels, routeName) + delete(rh.updates, routeName) + } + } +} + +// determines if all dynamic RouteConfiguration needed has received +// configuration or update. Must be called from an xDS Client Callback. +func (rh *rdsHandler) determineRouteConfigurationReady() bool { + // Safe to read cancels because only written to in other parts of xDS Client + // Callbacks, which are sync. + return len(rh.updates) == len(rh.cancels) +} + +// Must be called from an xDS Client Callback. +func (rh *rdsHandler) handleRouteUpdate(routeName string, update rdsWatcherUpdate) { + rwu := rh.updates[routeName] + + // Accept the new update if any of the following are true: + // 1. we had no valid update data. + // 2. the update is valid. + // 3. the update error is ResourceNotFound. + if rwu.data == nil || update.err == nil || xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { + rwu = update + } + rh.updates[routeName] = rwu + rh.callback(routeName, rwu) +} + +// close() is meant to be called by wrapped listener when the wrapped listener +// is closed, and it cleans up resources by canceling all the active RDS +// watches. +func (rh *rdsHandler) close() { + rh.mu.Lock() + defer rh.mu.Unlock() + for _, cancel := range rh.cancels { + cancel() + } +} + +type rdsWatcherUpdate struct { + data *xdsresource.RouteConfigUpdate + err error +} + +// rdsWatcher implements the xdsresource.RouteConfigWatcher interface and is +// passed to the WatchRouteConfig API. +type rdsWatcher struct { + parent *rdsHandler + logger *igrpclog.PrefixLogger + routeName string + + mu sync.Mutex + canceled bool // eats callbacks if true +} + +func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q received update: %#v", rw.routeName, update.Resource) + } + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource}) +} + +func (rw *rdsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q reported error: %v", rw.routeName, err) + } + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) +} + +func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q reported resource-does-not-exist error: %v", rw.routeName) + } + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type RouteConfiguration not found in received response", rw.routeName) + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go new file mode 100644 index 000000000..9076a76fd --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import "google.golang.org/grpc/resolver" + +type clientKeyType string + +const clientKey = clientKeyType("grpc.xds.internal.client.Client") + +// FromResolverState returns the Client from state, or nil if not present. +func FromResolverState(state resolver.State) XDSClient { + cs, _ := state.Attributes.Value(clientKey).(XDSClient) + return cs +} + +// SetClient sets c in state and returns the new state. +func SetClient(state resolver.State, c XDSClient) resolver.State { + state.Attributes = state.Attributes.WithValue(clientKey, c) + return state +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go new file mode 100644 index 000000000..3251737f1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -0,0 +1,703 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +type watchState int + +const ( + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. +) + +type resourceState struct { + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update + + // Common watch state for all watchers of this resource. + wTimer *time.Timer // Expiry timer + wState watchState // State of the watch +} + +// authority wraps all state associated with a single management server. It +// contains the transport used to communicate with the management server and a +// cache of resource state for resources requested from the management server. +// +// Bootstrap configuration could contain multiple entries in the authorities map +// that share the same server config (server address and credentials to use). We +// share the same authority instance amongst these entries, and the reference +// counting is taken care of by the `clientImpl` type. +type authority struct { + serverCfg *bootstrap.ServerConfig // Server config for this authority + bootstrapCfg *bootstrap.Config // Full bootstrap configuration + refCount int // Reference count of watches referring to this authority + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks + resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup + transport *transport.Transport // Underlying xDS transport to the management server + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *grpclog.PrefixLogger + + // A two level map containing the state of all the resources being watched. + // + // The first level map key is the ResourceType (Listener, Route etc). This + // allows us to have a single map for all resources instead of having per + // resource-type maps. + // + // The second level map key is the resource name, with the value being the + // actual state of the resource. + resourcesMu sync.Mutex + resources map[xdsresource.Type]map[string]*resourceState + closed bool +} + +// authorityArgs is a convenience struct to wrap arguments required to create a +// new authority. All fields here correspond directly to appropriate fields +// stored in the authority struct. +type authorityArgs struct { + // The reason for passing server config and bootstrap config separately + // (although the former is part of the latter) is because authorities in the + // bootstrap config might contain an empty server config, and in this case, + // the top-level server config is to be used. + serverCfg *bootstrap.ServerConfig + bootstrapCfg *bootstrap.Config + serializer *grpcsync.CallbackSerializer + resourceTypeGetter func(string) xdsresource.Type + watchExpiryTimeout time.Duration + logger *grpclog.PrefixLogger +} + +func newAuthority(args authorityArgs) (*authority, error) { + ret := &authority{ + serverCfg: args.serverCfg, + bootstrapCfg: args.bootstrapCfg, + serializer: args.serializer, + resourceTypeGetter: args.resourceTypeGetter, + watchExpiryTimeout: args.watchExpiryTimeout, + logger: args.logger, + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + tr, err := transport.New(transport.Options{ + ServerCfg: args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Logger: args.logger, + NodeProto: args.bootstrapCfg.Node(), + }) + if err != nil { + return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) + } + ret.transport = tr + return ret, nil +} + +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) +} + +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { + rType := a.resourceTypeGetter(resourceUpdate.URL) + if rType == nil { + return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) + } + + opts := &xdsresource.DecodeOptions{ + BootstrapConfig: a.bootstrapCfg, + ServerConfig: a.serverCfg, + } + updates, md, err := decodeAllResources(opts, rType, resourceUpdate) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) + return err +} + +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We build a list of callback funcs to invoke, and invoke them at the end + // of this method instead of inline (when handling the update for a + // particular resource), because we want to make sure that all calls to + // increment watcherCnt happen before any callbacks are invoked. This will + // ensure that the onDone callback is never invoked before all watcher + // callbacks are invoked, and the watchers have processed the update. + watcherCnt := new(atomic.Int64) + done := func() { + watcherCnt.Add(-1) + if watcherCnt.Load() == 0 { + onDone() + } + } + funcsToSchedule := []func(context.Context){} + defer func() { + if len(funcsToSchedule) == 0 { + // When there are no watchers for the resources received as part of + // this update, invoke onDone explicitly to unblock the next read on + // the ADS stream. + onDone() + } + for _, f := range funcsToSchedule { + a.serializer.ScheduleOr(f, onDone) + } + }() + + resourceStates := a.resources[rType] + for name, uErr := range updates { + if state, ok := resourceStates[name]; ok { + // Cancel the expiry timer associated with the resource once a + // response is received, irrespective of whether the update is a + // good one or not. + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the management + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } + state.wState = watchStateReceived + } + + if uErr.err != nil { + // On error, keep previous version of the resource. But update + // status and error. + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.err + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) + } + continue + } + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers only if this is a first time update or it is different + // from the one currently cached. + if state.cache == nil || !state.cache.Equal(uErr.resource) { + for watcher := range state.watchers { + watcher := watcher + resource := uErr.resource + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) + } + } + // Sync cache. + if a.logger.V(2) { + a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) + } + state.cache = uErr.resource + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } + } + } + + // If this resource type requires that all resources be present in every + // SotW response from the server, a response that does not include a + // previously seen resource will be interpreted as a deletion of that + // resource unless ignore_resource_deletion option was set in the server + // config. + if !rType.AllResourcesRequiredInSotW() { + return + } + for name, state := range resourceStates { + if state.cache == nil { + // If the resource state does not contain a cached update, which can + // happen when: + // - resource was newly requested but has not yet been received, or, + // - resource was removed as part of a previous update, + // we don't want to generate an error for the watchers. + // + // For the first of the above two conditions, this ADS response may + // be in reaction to an earlier request that did not yet request the + // new resource, so its absence from the response does not + // necessarily indicate that the resource does not exist. For that + // case, we rely on the request timeout instead. + // + // For the second of the above two conditions, we already generated + // an error when we received the first response which removed this + // resource. So, there is no need to generate another one. + continue + } + if _, ok := updates[name]; !ok { + // The metadata status is set to "ServiceStatusNotExist" if a + // previous update deleted this resource, in which case we do not + // want to repeatedly call the watch callbacks with a + // "resource-not-found" error. + if state.md.Status == xdsresource.ServiceStatusNotExist { + continue + } + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } + continue + } + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send a resource not found error + // to indicate resource removed. Metadata for the resource is still + // maintained, as this is required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) + } + } + } +} + +type resourceDataErrTuple struct { + resource xdsresource.ResourceData + err error +} + +func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: update.Version, + Timestamp: timestamp, + } + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range update.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = resourceDataErrTuple{resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = resourceDataErrTuple{err: err} + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil + } + + md.Status = xdsresource.ServiceStatusNACKed + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: update.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} + +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.resourcesMu.Lock() + a.handleWatchTimerExpiryLocked(rType, resourceName, state) + a.resourcesMu.Unlock() + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + +// newConnectionError is called by the underlying transport when it receives a +// connection error. The error will be forwarded to all the resource watchers. +func (a *authority) newConnectionError(err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + + for _, rType := range a.resources { + for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) + }) + } + } + } +} + +// Increments the reference count. Caller must hold parent's authorityMu. +func (a *authority) refLocked() { + a.refCount++ +} + +// Decrements the reference count. Caller must hold parent's authorityMu. +func (a *authority) unrefLocked() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + a.transport.Close() + + a.resourcesMu.Lock() + a.closed = true + a.resourcesMu.Unlock() +} + +func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + if a.logger.V(2) { + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // Lookup the ResourceType specific resources from the top-level map. If + // there is no entry for this ResourceType, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources + } + + // Lookup the resourceState for the particular resource that the watch is + // being registered for. If this is the first watch for this resource, + // instruct the transport layer to send a DiscoveryRequest for the same. + state := resources[resourceName] + if state == nil { + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + wState: watchStateStarted, + } + resources[resourceName] = state + a.sendDiscoveryRequestLocked(rType, resources) + } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true + + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + if a.logger.V(2) { + a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + } + resource := state.cache + a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) + } + + return func() { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We already have a reference to the resourceState for this particular + // resource. Avoid indexing into the two-level map to figure this out. + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + state.wState = watchStateCanceled + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + return + } + + // There are no more watchers for this resource, delete the state + // associated with it, and instruct the transport to send a request + // which does not include this resource name. + if a.logger.V(2) { + a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + delete(resources, resourceName) + a.sendDiscoveryRequestLocked(rType, resources) + } +} + +func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { + if a.closed { + return + } + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) + return + } + + state.wState = watchStateTimeout + // With the watch timer firing, it is safe to assume that the resource does + // not exist on the management server. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + if a.closed { + return + } + resourceStates := a.resources[rType] + state, ok := resourceStates[resourceName] + if !ok { + return + } + // if watchStateTimeout already triggered resource not found above from + // normal watch expiry. + if state.wState == watchStateCanceled || state.wState == watchStateTimeout { + return + } + state.wState = watchStateTimeout + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +// sendDiscoveryRequestLocked sends a discovery request for the specified +// resource type and resource names. Even though this method does not directly +// access the resource cache, it is important that `resourcesMu` be held when +// calling this method to ensure that a consistent snapshot of resource names is +// being requested. +func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { + resourcesToRequest := make([]string, len(resources)) + i := 0 + for name := range resources { + resourcesToRequest[i] = name + i++ + } + a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) +} + +func (a *authority) reportLoad() (*load.Store, func()) { + return a.transport.ReportLoad() +} + +func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + var ret []*v3statuspb.ClientConfig_GenericXdsConfig + for rType, resourceStates := range a.resources { + typeURL := rType.TypeURL() + for name, state := range resourceStates { + var raw *anypb.Any + if state.cache != nil { + raw = state.cache.Raw() + } + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: state.md.Version, + XdsConfig: raw, + LastUpdated: timestamppb.New(state.md.Timestamp), + ClientStatus: serviceStatusToProto(state.md.Status), + } + if errState := state.md.ErrState; errState != nil { + config.ErrorState = &v3adminpb.UpdateFailureState{ + LastUpdateAttempt: timestamppb.New(errState.Timestamp), + Details: errState.Err.Error(), + VersionInfo: errState.Version, + } + } + ret = append(ret, config) + } + } + return ret +} + +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { + switch serviceStatus { + case xdsresource.ServiceStatusUnknown: + return v3adminpb.ClientResourceStatus_UNKNOWN + case xdsresource.ServiceStatusRequested: + return v3adminpb.ClientResourceStatus_REQUESTED + case xdsresource.ServiceStatusNotExist: + return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST + case xdsresource.ServiceStatusACKed: + return v3adminpb.ClientResourceStatus_ACKED + case xdsresource.ServiceStatusNACKed: + return v3adminpb.ClientResourceStatus_NACKED + default: + return v3adminpb.ClientResourceStatus_UNKNOWN + } +} + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go new file mode 100644 index 000000000..144cb5bd7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdsclient implements a full fledged gRPC client for the xDS API used +// by the xds resolver and balancer implementations. +package xdsclient + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + // + // Most callers will not have a need to use this API directly. They will + // instead use a resource-type-specific wrapper API provided by the relevant + // resource type implementation. + // + // + // During a race (e.g. an xDS response is received while the user is calling + // cancel()), there's a small window where the callback can be called after + // the watcher is canceled. Callers need to handle this case. + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + + BootstrapConfig() *bootstrap.Config +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go new file mode 100644 index 000000000..6097e8692 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// NameForServer represents the value to be passed as name when creating an xDS +// client from xDS-enabled gRPC servers. This is a well-known dedicated key +// value, and is defined in gRFC A71. +const NameForServer = "#server" + +// New returns an xDS client configured with bootstrap configuration specified +// by the ordered list: +// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP +// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG +// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig +// +// gRPC client implementations are expected to pass the channel's target URI for +// the name field, while server implementations are expected to pass a dedicated +// well-known value "#server", as specified in gRFC A71. The returned client is +// a reference counted implementation shared among callers using the same name. +// +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +func New(name string) (XDSClient, func(), error) { + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) +} + +// newClientImpl returns a new xdsClient with the given config. +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + ctx, cancel := context.WithCancel(context.Background()) + c := &clientImpl{ + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerClose: cancel, + resourceTypes: newResourceTypeRegistry(), + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + + c.logger = prefixLogger(c) + return c, nil +} + +// OptionsForTesting contains options to configure xDS client creation for +// testing purposes only. +type OptionsForTesting struct { + // Name is a unique name for this xDS client. + Name string + // Contents contain a JSON representation of the bootstrap configuration to + // be used when creating the xDS client. + Contents []byte + + // WatchExpiryTimeout is the timeout for xDS resource watch expiry. If + // unspecified, uses the default value used in non-test code. + WatchExpiryTimeout time.Duration + + // AuthorityIdleTimeout is the timeout before idle authorities are deleted. + // If unspecified, uses the default value used in non-test code. + AuthorityIdleTimeout time.Duration +} + +// NewForTesting returns an xDS client configured with the provided options. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { + if opts.Name == "" { + return nil, nil, fmt.Errorf("opts.Name field must be non-empty") + } + if opts.WatchExpiryTimeout == 0 { + opts.WatchExpiryTimeout = defaultWatchExpiryTimeout + } + if opts.AuthorityIdleTimeout == 0 { + opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout + } + + if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { + return nil, nil, err + } + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) + return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err +} + +// GetForTesting returns an xDS client created earlier using the given name. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func GetForTesting(name string) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + c, ok := clients[name] + if !ok { + return nil, nil, fmt.Errorf("xDS client with name %q not found", name) + } + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +func init() { + internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting +} + +func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { + crc, ok := client.(*clientRefCounted) + if !ok { + return fmt.Errorf("xDS client is of type %T, want %T", client, &clientRefCounted{}) + } + return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go new file mode 100644 index 000000000..1efb4de42 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + xdsClientImplCreateHook = func(string) {} + xdsClientImplCloseHook = func(string) {} +) + +func clientRefCountedClose(name string) { + clientsMu.Lock() + defer clientsMu.Unlock() + + client, ok := clients[name] + if !ok { + logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + return + } + if client.decrRef() != 0 { + return + } + client.clientImpl.close() + xdsClientImplCloseHook(name) + delete(clients, name) + +} + +// newRefCounted creates a new reference counted xDS client implementation for +// name, if one does not exist already. If an xDS client for the given name +// exists, it gets a reference to it and returns it. +func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + if c := clients[name]; c != nil { + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil + } + + // Create the new client implementation. + config, err := bootstrap.GetConfiguration() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) + } + c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout) + if err != nil { + return nil, nil, err + } + c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config) + client := &clientRefCounted{clientImpl: c, refCount: 1} + clients[name] = client + xdsClientImplCreateHook(name) + + logger.Infof("xDS node ID: %s", config.Node().GetId()) + return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go new file mode 100644 index 000000000..9f619016a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +var _ XDSClient = &clientImpl{} + +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + serializer *grpcsync.CallbackSerializer + serializerClose func() + resourceTypes *resourceTypeRegistry + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache +} + +// BootstrapConfig returns the configuration read from the bootstrap file. +// Callers must treat the return value as read-only. +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { + return c.config +} + +// close closes the gRPC connection to the management server. +func (c *clientImpl) close() { + if c.done.HasFired() { + return + } + c.done.Fire() + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + c.serializerClose() + + for _, s := range c.config.XDSServers() { + for _, f := range s.Cleanups() { + f() + } + } + for _, a := range c.config.Authorities() { + for _, s := range a.XDSServers { + for _, f := range s.Cleanups() { + f() + } + } + } + c.logger.Infof("Shutdown") +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 000000000..1ce20fabd --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServers()[0] + if scheme == xdsresource.FederationScheme { + authorities := c.config.Authorities() + if authorities == nil { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + cfg, ok := authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + if len(cfg.XDSServers) >= 1 { + config = cfg.XDSServers[0] + } + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.refLocked() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the given config. If an +// authority for the given config exists in the cache, it is returned instead of +// creating a new one. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + ret, err := newAuthority(authorityArgs{ + serverCfg: config, + bootstrapCfg: c.config, + serializer: c.serializer, + resourceTypeGetter: c.resourceTypes.get, + watchExpiryTimeout: c.watchExpiryTimeout, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), + }) + if err != nil { + return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) + } + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unrefLocked() > 0 { + return + } + configStr := a.serverCfg.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go new file mode 100644 index 000000000..f4d7b0a01 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +// dumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + + var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig + for _, a := range c.authorities { + retCfg = append(retCfg, a.dumpResources()...) + } + + return &v3statuspb.ClientConfig{ + Node: c.config.Node(), + GenericXdsConfigs: retCfg, + } +} + +// DumpResources returns the status and contents of all xDS resources. +func DumpResources() *v3statuspb.ClientStatusResponse { + clientsMu.Lock() + defer clientsMu.Unlock() + + resp := &v3statuspb.ClientStatusResponse{} + for key, client := range clients { + cfg := client.dumpResources() + cfg.ClientScope = key + resp.Config = append(resp.Config, cfg) + } + return resp +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go new file mode 100644 index 000000000..b42e43a56 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// ReportLoad starts a load reporting stream to the given server. All load +// reports to the same server share the LRS stream. +// +// It returns a Store for the user to report loads, a function to cancel the +// load reporting stream. +func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + if err != nil { + c.authorityMu.Unlock() + c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) + return nil, func() {} + } + // Hold the ref before starting load reporting. + a.refLocked() + c.authorityMu.Unlock() + + store, cancelF := a.reportLoad() + return store, func() { + cancelF() + c.unrefAuthority(a) + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go new file mode 100644 index 000000000..b9af85db6 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// WatchResource uses xDS to discover the resource associated with the provided +// resource name. The resource type implementation determines how xDS requests +// are sent out and how responses are deserialized and validated. Upon receipt +// of a response from the management server, an appropriate callback on the +// watcher is invoked. +func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { + // Return early if the client is already closed. + // + // The client returned from the top-level API is a ref-counted client which + // contains a pointer to `clientImpl`. When all references are released, the + // ref-counted client sets its pointer to `nil`. And if any watch APIs are + // made on such a closed client, we will get here with a `nil` receiver. + if c == nil || c.done.HasFired() { + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeName(), resourceName) + return func() {} + } + + if err := c.resourceTypes.maybeRegister(rType); err != nil { + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + return func() {} + } + + // TODO: Make ParseName return an error if parsing fails, and + // schedule the OnError callback in that case. + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + return func() {} + } + cancelF := a.watchResource(rType, n.String(), watcher) + return func() { + cancelF() + unref() + } +} + +// A registry of xdsresource.Type implementations indexed by their corresponding +// type URLs. Registration of an xdsresource.Type happens the first time a watch +// for a resource of that type is invoked. +type resourceTypeRegistry struct { + mu sync.Mutex + types map[string]xdsresource.Type +} + +func newResourceTypeRegistry() *resourceTypeRegistry { + return &resourceTypeRegistry{types: make(map[string]xdsresource.Type)} +} + +func (r *resourceTypeRegistry) get(url string) xdsresource.Type { + r.mu.Lock() + defer r.mu.Unlock() + return r.types[url] +} + +func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { + r.mu.Lock() + defer r.mu.Unlock() + + url := rType.TypeURL() + typ, ok := r.types[url] + if ok && typ != rType { + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeName()) + } + r.types[url] = rType + return nil +} + +func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { + if c == nil || c.done.HasFired() { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) + } + + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but authority %q is not found", rType.TypeName(), resourceName, n.Authority) + } + defer unref() + a.triggerResourceNotFoundForTesting(rType, n.String()) + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go new file mode 100644 index 000000000..e12610744 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the xdsclient package. +package internal + +// The following vars can be overridden by tests. +var ( + // NewADSStream is a function that returns a new ADS stream. + NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go new file mode 100644 index 000000000..67e29e5ba --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package load + +// PerClusterReporter wraps the methods from the loadStore that are used here. +type PerClusterReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) + CallDropped(category string) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go new file mode 100644 index 000000000..f1e265ee7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -0,0 +1,441 @@ +/* + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package load provides functionality to record and maintain load data. +package load + +import ( + "sync" + "sync/atomic" + "time" +) + +const negativeOneUInt64 = ^uint64(0) + +// Store keeps the loads for multiple clusters and services to be reported via +// LRS. It contains loads to reported to one LRS server. Create multiple stores +// for multiple servers. +// +// It is safe for concurrent use. +type Store struct { + // mu only protects the map (2 layers). The read/write to *perClusterStore + // doesn't need to hold the mu. + mu sync.Mutex + // clusters is a map with cluster name as the key. The second layer is a map + // with service name as the key. Each value (perClusterStore) contains data + // for a (cluster, service) pair. + // + // Note that new entries are added to this map, but never removed. This is + // potentially a memory leak. But the memory is allocated for each new + // (cluster,service) pair, and the memory allocated is just pointers and + // maps. So this shouldn't get too bad. + clusters map[string]map[string]*perClusterStore +} + +// NewStore creates a Store. +func NewStore() *Store { + return &Store{ + clusters: make(map[string]map[string]*perClusterStore), + } +} + +// Stats returns the load data for the given cluster names. Data is returned in +// a slice with no specific order. +// +// If no clusterName is given (an empty slice), all data for all known clusters +// is returned. +// +// If a cluster's Data is empty (no load to report), it's not appended to the +// returned slice. +func (s *Store) Stats(clusterNames []string) []*Data { + var ret []*Data + s.mu.Lock() + defer s.mu.Unlock() + + if len(clusterNames) == 0 { + for _, c := range s.clusters { + ret = appendClusterStats(ret, c) + } + return ret + } + + for _, n := range clusterNames { + if c, ok := s.clusters[n]; ok { + ret = appendClusterStats(ret, c) + } + } + return ret +} + +// appendClusterStats gets Data for the given cluster, append to ret, and return +// the new slice. +// +// Data is only appended to ret if it's not empty. +func appendClusterStats(ret []*Data, cluster map[string]*perClusterStore) []*Data { + for _, d := range cluster { + data := d.stats() + if data == nil { + // Skip this data if it doesn't contain any information. + continue + } + ret = append(ret, data) + } + return ret +} + +// PerCluster returns the perClusterStore for the given clusterName + +// serviceName. +func (s *Store) PerCluster(clusterName, serviceName string) PerClusterReporter { + if s == nil { + return nil + } + + s.mu.Lock() + defer s.mu.Unlock() + c, ok := s.clusters[clusterName] + if !ok { + c = make(map[string]*perClusterStore) + s.clusters[clusterName] = c + } + + if p, ok := c[serviceName]; ok { + return p + } + p := &perClusterStore{ + cluster: clusterName, + service: serviceName, + } + c[serviceName] = p + return p +} + +// perClusterStore is a repository for LB policy implementations to report store +// load data. It contains load for a (cluster, edsService) pair. +// +// It is safe for concurrent use. +// +// TODO(easwars): Use regular maps with mutexes instead of sync.Map here. The +// latter is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only +// grow, or (2) when multiple goroutines read, write, and overwrite entries for +// disjoint sets of keys. In these two cases, use of a Map may significantly +// reduce lock contention compared to a Go map paired with a separate Mutex or +// RWMutex. +// Neither of these conditions are met here, and we should transition to a +// regular map with a mutex for better type safety. +type perClusterStore struct { + cluster, service string + drops sync.Map // map[string]*uint64 + localityRPCCount sync.Map // map[string]*rpcCountData + + mu sync.Mutex + lastLoadReportAt time.Time +} + +// Update functions are called by picker for each RPC. To avoid contention, all +// updates are done atomically. + +// CallDropped adds one drop record with the given category to store. +func (ls *perClusterStore) CallDropped(category string) { + if ls == nil { + return + } + + p, ok := ls.drops.Load(category) + if !ok { + tp := new(uint64) + p, _ = ls.drops.LoadOrStore(category, tp) + } + atomic.AddUint64(p.(*uint64), 1) +} + +// CallStarted adds one call started record for the given locality. +func (ls *perClusterStore) CallStarted(locality string) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + tp := newRPCCountData() + p, _ = ls.localityRPCCount.LoadOrStore(locality, tp) + } + p.(*rpcCountData).incrInProgress() + p.(*rpcCountData).incrIssued() +} + +// CallFinished adds one call finished record for the given locality. +// For successful calls, err needs to be nil. +func (ls *perClusterStore) CallFinished(locality string, err error) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + // The map is never cleared, only values in the map are reset. So the + // case where entry for call-finish is not found should never happen. + return + } + p.(*rpcCountData).decrInProgress() + if err == nil { + p.(*rpcCountData).incrSucceeded() + } else { + p.(*rpcCountData).incrErrored() + } +} + +// CallServerLoad adds one server load record for the given locality. The +// load type is specified by desc, and its value by val. +func (ls *perClusterStore) CallServerLoad(locality, name string, d float64) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + // The map is never cleared, only values in the map are reset. So the + // case where entry for callServerLoad is not found should never happen. + return + } + p.(*rpcCountData).addServerLoad(name, d) +} + +// Data contains all load data reported to the Store since the most recent call +// to stats(). +type Data struct { + // Cluster is the name of the cluster this data is for. + Cluster string + // Service is the name of the EDS service this data is for. + Service string + // TotalDrops is the total number of dropped requests. + TotalDrops uint64 + // Drops is the number of dropped requests per category. + Drops map[string]uint64 + // LocalityStats contains load reports per locality. + LocalityStats map[string]LocalityData + // ReportInternal is the duration since last time load was reported (stats() + // was called). + ReportInterval time.Duration +} + +// LocalityData contains load data for a single locality. +type LocalityData struct { + // RequestStats contains counts of requests made to the locality. + RequestStats RequestData + // LoadStats contains server load data for requests made to the locality, + // indexed by the load type. + LoadStats map[string]ServerLoadData +} + +// RequestData contains request counts. +type RequestData struct { + // Succeeded is the number of succeeded requests. + Succeeded uint64 + // Errored is the number of requests which ran into errors. + Errored uint64 + // InProgress is the number of requests in flight. + InProgress uint64 + // Issued is the total number requests that were sent. + Issued uint64 +} + +// ServerLoadData contains server load data. +type ServerLoadData struct { + // Count is the number of load reports. + Count uint64 + // Sum is the total value of all load reports. + Sum float64 +} + +func newData(cluster, service string) *Data { + return &Data{ + Cluster: cluster, + Service: service, + Drops: make(map[string]uint64), + LocalityStats: make(map[string]LocalityData), + } +} + +// stats returns and resets all loads reported to the store, except inProgress +// rpc counts. +// +// It returns nil if the store doesn't contain any (new) data. +func (ls *perClusterStore) stats() *Data { + if ls == nil { + return nil + } + + sd := newData(ls.cluster, ls.service) + ls.drops.Range(func(key, val any) bool { + d := atomic.SwapUint64(val.(*uint64), 0) + if d == 0 { + return true + } + sd.TotalDrops += d + keyStr := key.(string) + if keyStr != "" { + // Skip drops without category. They are counted in total_drops, but + // not in per category. One example is drops by circuit breaking. + sd.Drops[keyStr] = d + } + return true + }) + ls.localityRPCCount.Range(func(key, val any) bool { + countData := val.(*rpcCountData) + succeeded := countData.loadAndClearSucceeded() + inProgress := countData.loadInProgress() + errored := countData.loadAndClearErrored() + issued := countData.loadAndClearIssued() + if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 { + return true + } + + ld := LocalityData{ + RequestStats: RequestData{ + Succeeded: succeeded, + Errored: errored, + InProgress: inProgress, + Issued: issued, + }, + LoadStats: make(map[string]ServerLoadData), + } + countData.serverLoads.Range(func(key, val any) bool { + sum, count := val.(*rpcLoadData).loadAndClear() + if count == 0 { + return true + } + ld.LoadStats[key.(string)] = ServerLoadData{ + Count: count, + Sum: sum, + } + return true + }) + sd.LocalityStats[key.(string)] = ld + return true + }) + + ls.mu.Lock() + sd.ReportInterval = time.Since(ls.lastLoadReportAt) + ls.lastLoadReportAt = time.Now() + ls.mu.Unlock() + + if sd.TotalDrops == 0 && len(sd.Drops) == 0 && len(sd.LocalityStats) == 0 { + return nil + } + return sd +} + +type rpcCountData struct { + // Only atomic accesses are allowed for the fields. + succeeded *uint64 + errored *uint64 + inProgress *uint64 + issued *uint64 + + // Map from load desc to load data (sum+count). Loading data from map is + // atomic, but updating data takes a lock, which could cause contention when + // multiple RPCs try to report loads for the same desc. + // + // To fix the contention, shard this map. + serverLoads sync.Map // map[string]*rpcLoadData +} + +func newRPCCountData() *rpcCountData { + return &rpcCountData{ + succeeded: new(uint64), + errored: new(uint64), + inProgress: new(uint64), + issued: new(uint64), + } +} + +func (rcd *rpcCountData) incrSucceeded() { + atomic.AddUint64(rcd.succeeded, 1) +} + +func (rcd *rpcCountData) loadAndClearSucceeded() uint64 { + return atomic.SwapUint64(rcd.succeeded, 0) +} + +func (rcd *rpcCountData) incrErrored() { + atomic.AddUint64(rcd.errored, 1) +} + +func (rcd *rpcCountData) loadAndClearErrored() uint64 { + return atomic.SwapUint64(rcd.errored, 0) +} + +func (rcd *rpcCountData) incrInProgress() { + atomic.AddUint64(rcd.inProgress, 1) +} + +func (rcd *rpcCountData) decrInProgress() { + atomic.AddUint64(rcd.inProgress, negativeOneUInt64) // atomic.Add(x, -1) +} + +func (rcd *rpcCountData) loadInProgress() uint64 { + return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. +} + +func (rcd *rpcCountData) incrIssued() { + atomic.AddUint64(rcd.issued, 1) +} + +func (rcd *rpcCountData) loadAndClearIssued() uint64 { + return atomic.SwapUint64(rcd.issued, 0) +} + +func (rcd *rpcCountData) addServerLoad(name string, d float64) { + loads, ok := rcd.serverLoads.Load(name) + if !ok { + tl := newRPCLoadData() + loads, _ = rcd.serverLoads.LoadOrStore(name, tl) + } + loads.(*rpcLoadData).add(d) +} + +// Data for server loads (from trailers or oob). Fields in this struct must be +// updated consistently. +// +// The current solution is to hold a lock, which could cause contention. To fix, +// shard serverLoads map in rpcCountData. +type rpcLoadData struct { + mu sync.Mutex + sum float64 + count uint64 +} + +func newRPCLoadData() *rpcLoadData { + return &rpcLoadData{} +} + +func (rld *rpcLoadData) add(v float64) { + rld.mu.Lock() + rld.sum += v + rld.count++ + rld.mu.Unlock() +} + +func (rld *rpcLoadData) loadAndClear() (s float64, c uint64) { + rld.mu.Lock() + s = rld.sum + rld.sum = 0 + c = rld.count + rld.count = 0 + rld.mu.Unlock() + return +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go new file mode 100644 index 000000000..2269cb293 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, clientPrefix(p)) +} + +func clientPrefix(p *clientImpl) string { + return fmt.Sprintf("[xds-client %p] ", p) +} + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go new file mode 100644 index 000000000..bd08c7e18 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type clusterNameAndServiceName struct { + clusterName, edsServiceName string +} + +type clusterRequestsCounter struct { + mu sync.Mutex + clusters map[clusterNameAndServiceName]*ClusterRequestsCounter +} + +var src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), +} + +// ClusterRequestsCounter is used to track the total inflight requests for a +// service with the provided name. +type ClusterRequestsCounter struct { + ClusterName string + EDSServiceName string + numRequests uint32 +} + +// GetClusterRequestsCounter returns the ClusterRequestsCounter with the +// provided serviceName. If one does not exist, it creates it. +func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterRequestsCounter { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServiceName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + c = &ClusterRequestsCounter{ClusterName: clusterName} + src.clusters[k] = c + } + return c +} + +// StartRequest starts a request for a cluster, incrementing its number of +// requests by 1. Returns an error if the max number of requests is exceeded. +func (c *ClusterRequestsCounter) StartRequest(max uint32) error { + // Note that during race, the limits could be exceeded. This is allowed: + // "Since the implementation is eventually consistent, races between threads + // may allow limits to be potentially exceeded." + // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. + if atomic.LoadUint32(&c.numRequests) >= max { + return fmt.Errorf("max requests %v exceeded on service %v", max, c.ClusterName) + } + atomic.AddUint32(&c.numRequests, 1) + return nil +} + +// EndRequest ends a request for a service, decrementing its number of requests +// by 1. +func (c *ClusterRequestsCounter) EndRequest() { + atomic.AddUint32(&c.numRequests, ^uint32(0)) +} + +// ClearCounterForTesting clears the counter for the service. Should be only +// used in tests. +func ClearCounterForTesting(clusterName, edsServiceName string) { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServiceName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + return + } + c.numRequests = 0 +} + +// ClearAllCountersForTesting clears all the counters. Should be only used in +// tests. +func ClearAllCountersForTesting() { + src.mu.Lock() + defer src.mu.Unlock() + src.clusters = make(map[clusterNameAndServiceName]*ClusterRequestsCounter) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go new file mode 100644 index 000000000..9acc33cbb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the transport package. +package internal + +// The following vars can be overridden by tests. +var ( + // GRPCNewClient creates a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go new file mode 100644 index 000000000..e47fdd984 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient + +// ReportLoad starts reporting loads to the management server the transport is +// configured to use. +// +// It returns a Store for the user to report loads and a function to cancel the +// load reporting. +func (t *Transport) ReportLoad() (*load.Store, func()) { + t.lrsStartStream() + return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) +} + +// lrsStartStream starts an LRS stream to the server, if none exists. +func (t *Transport) lrsStartStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount++ + if t.lrsRefCount != 1 { + // Return early if the stream has already been started. + return + } + + ctx, cancel := context.WithCancel(context.Background()) + t.lrsCancelStream = cancel + + // Create a new done channel everytime a new stream is created. This ensures + // that we don't close the same channel multiple times (from lrsRunner() + // goroutine) when multiple streams are created and closed. + t.lrsRunnerDoneCh = make(chan struct{}) + go t.lrsRunner(ctx) +} + +// lrsStopStream closes the LRS stream, if this is the last user of the stream. +func (t *Transport) lrsStopStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount-- + if t.lrsRefCount != 0 { + // Return early if the stream has other references. + return + } + + t.lrsCancelStream() + t.logger.Infof("Stopping LRS stream") + + // Wait for the runner goroutine to exit. The done channel will be + // recreated when a new stream is created. + <-t.lrsRunnerDoneCh +} + +// lrsRunner starts an LRS stream to report load data to the management server. +// It reports load at constant intervals (as configured by the management +// server) until the context is cancelled. +func (t *Transport) lrsRunner(ctx context.Context) { + defer close(t.lrsRunnerDoneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(t.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return nil + } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) + + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } + + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return nil + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + t.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff + } + backoff.RunF(ctx, runLoadReportStream, t.backoff) +} + +func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { + t.logger.Warningf("Writing to LRS stream failed: %v", err) + return + } + } +} + +func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { + resp, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } + + rInterval := resp.GetLoadReportingInterval() + if rInterval.CheckValid() != nil { + return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) + } + interval := rInterval.AsDuration() + + if resp.ReportEndpointGranularity { + // TODO(easwars): Support per endpoint loads. + return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") + } + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return nil to send stats for all clusters. + clusters = nil + } + + return clusters, interval, nil +} + +func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: durationpb.New(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go new file mode 100644 index 000000000..0bc0d3868 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -0,0 +1,704 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport implements the xDS transport protocol functionality +// required by the xdsclient. +package transport + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/keepalive" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +func init() { + transportinternal.GRPCNewClient = grpc.NewClient + xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2. +const perRPCVerbosityLevel = 9 + +// Transport provides a resource-type agnostic implementation of the xDS +// transport protocol. At this layer, resource contents are supposed to be +// opaque blobs which should be meaningful only to the xDS data model layer +// which is implemented by the `xdsresource` package. +// +// Under the hood, it owns the gRPC connection to a single management server and +// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport +// protocol version. +type Transport struct { + // These fields are initialized at creation time and are read-only afterwards. + cc *grpc.ClientConn // ClientConn to the management server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + + // These channels enable synchronization amongst the different goroutines + // spawned by the transport, and between asynchronous events resulting from + // receipt of responses from the management server. + adsStreamCh chan adsStream // New ADS streams are pushed here. + adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. + + // mu guards the following runtime state maintained by the transport. + mu sync.Mutex + // resources is map from resource type URL to the set of resource names + // being requested for that type. When the ADS stream is restarted, the + // transport requests all these resources again from the management server. + resources map[string]map[string]bool + // versions is a map from resource type URL to the most recently ACKed + // version for that resource. Resource versions are a property of the + // resource type and not the stream, and will not be reset upon stream + // restarts. + versions map[string]string + // nonces is a map from resource type URL to the most recently received + // nonce for that resource type. Nonces are a property of the ADS stream and + // will be reset upon stream restarts. + nonces map[string]string + + lrsMu sync.Mutex // Protects all LRS state. + lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. + lrsRefCount int // Reference count on the load store. +} + +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which +// determines if the configuration received from the management server can be +// applied locally or not. +// +// A nil error is returned from this function when the data model layer believes +// that the received configuration is good and can be applied locally. This will +// cause the transport layer to send an ACK to the management server. A non-nil +// error is returned from this function when the data model layer believes +// otherwise, and this will cause the transport layer to send a NACK. +// +// The implementation is expected to invoke onDone when local processing of the +// update is complete, i.e. it is consumed by all watchers. +type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) + +// ResourceUpdate is a representation of the configuration update received from +// the management server. It only contains fields which are useful to the data +// model layer, and layers above it. +type ResourceUpdate struct { + // Resources is the list of resources received from the management server. + Resources []*anypb.Any + // URL is the resource type URL for the above resources. + URL string + // Version is the resource version, for the above resources, as specified by + // the management server. + Version string +} + +// Options specifies configuration knobs used when creating a new Transport. +type Options struct { + // ServerCfg contains all the configuration required to connect to the xDS + // management server. + ServerCfg *bootstrap.ServerConfig + // OnRecvHandler is the component which makes ACK/NACK decisions based on + // the received resources. + // + // Invoked inline and implementations must not block. + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report + // underlying stream errors. These can be bubbled all the way up to the user + // of the xdsClient. + // + // Invoked inline and implementations must not block. + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) + // Backoff controls the amount of time to backoff before recreating failed + // ADS streams. If unspecified, a default exponential backoff implementation + // is used. For more details, see: + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. + Backoff func(retries int) time.Duration + // Logger does logging with a prefix. + Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +// New creates a new Transport. +func New(opts Options) (*Transport, error) { + switch { + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") + } + + // Dial the xDS management with the passed in credentials. + dopts := []grpc.DialOption{ + opts.ServerCfg.CredsDialOption(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // We decided to use these sane defaults in all languages, and + // kicked the can down the road as far making these configurable. + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) + } + cc.Connect() + + boff := opts.Backoff + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } + ret := &Transport{ + cc: cc, + serverURI: opts.ServerCfg.ServerURI(), + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, + + adsStreamCh: make(chan adsStream, 1), + adsRequestCh: buffer.NewUnbounded(), + resources: make(map[string]map[string]bool), + versions: make(map[string]string), + nonces: make(map[string]string), + adsRunnerDoneCh: make(chan struct{}), + } + + // This context is used for sending and receiving RPC requests and + // responses. It is also used by all the goroutines spawned by this + // Transport. Therefore, cancelling this context when the transport is + // closed will essentially cancel any pending RPCs, and cause the goroutines + // to terminate. + ctx, cancel := context.WithCancel(context.Background()) + ret.adsRunnerCancel = cancel + go ret.adsRunner(ctx) + + ret.logger.Infof("Created transport to server %q", ret.serverURI) + return ret, nil +} + +// resourceRequest wraps the resource type url and the resource names requested +// by the user of this transport. +type resourceRequest struct { + resources []string + url string +} + +// SendRequest sends out an ADS request for the provided resources of the +// specified resource type. +// +// The request is sent out asynchronously. If no valid stream exists at the time +// of processing this request, it is queued and will be sent out once a valid +// stream exists. +// +// If a successful response is received, the update handler callback provided at +// creation time is invoked. If an error is encountered, the stream error +// handler callback provided at creation time is invoked. +func (t *Transport) SendRequest(url string, resources []string) { + t.adsRequestCh.Put(&resourceRequest{ + url: url, + resources: resources, + }) +} + +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + TypeUrl: resourceURL, + ResourceNames: resourceNames, + VersionInfo: version, + ResponseNonce: nonce, + } + if sendNodeProto { + req.Node = t.nodeProto + } + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + if err := stream.Send(req); err != nil { + return err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + if t.logger.V(2) { + t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } + } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) + return nil +} + +func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { + resp, err := stream.Recv() + if err != nil { + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else if t.logger.V(2) { + t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// adsRunner starts an ADS stream (and backs off exponentially, if the previous +// stream failed without receiving a single reply) and runs the sender and +// receiver routines to send and receive data from the stream respectively. +func (t *Transport) adsRunner(ctx context.Context) { + defer close(t.adsRunnerDoneCh) + + go t.send(ctx) + + // We reset backoff state when we successfully receive at least one + // message from the server. + runStreamWithBackoff := func() error { + newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) + stream, err := newStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return nil + } + t.logger.Infof("ADS stream created") + + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + msgReceived := t.recv(ctx, stream) + if msgReceived { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStreamWithBackoff, t.backoff) +} + +// send is a separate goroutine for sending resource requests on the ADS stream. +// +// For every new stream received on the stream channel, all existing resources +// are re-requested from the management server. +// +// For every new resource request received on the resources channel, the +// resources map is updated (this ensures that resend will pick them up when +// there are new streams) and the appropriate request is sent out. +func (t *Transport) send(ctx context.Context) { + var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sentNodeProto := false + for { + select { + case <-ctx.Done(): + return + case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. + var err error + if sentNodeProto, err = t.sendExisting(stream); err != nil { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + continue + } + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return + } + t.adsRequestCh.Load() + + var ( + resources []string + url, version, nonce string + send bool + nackErr error + ) + switch update := u.(type) { + case *resourceRequest: + resources, url, version, nonce = t.processResourceRequest(update) + case *ackRequest: + resources, url, version, nonce, send = t.processAckRequest(update, stream) + if !send { + continue + } + nackErr = update.nackErr + } + if stream == nil { + // There's no stream yet. Skip the request. This request + // will be resent to the new streams. If no stream is + // created, the watcher will timeout (same as server not + // sending response back). + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) + // Send failed, clear the current stream. + stream = nil + } + sentNodeProto = true + } + } +} + +// sendExisting sends out xDS requests for existing resources when recovering +// from a broken stream. +// +// We call stream.Send() here with the lock being held. It should be OK to do +// that here because the stream has just started and Send() usually returns +// quickly (once it pushes the message onto the transport layer) and is only +// ever blocked if we don't have enough flow control quota. +// +// Returns true if the node proto was sent. +func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err error) { + t.mu.Lock() + defer t.mu.Unlock() + + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream + t.nonces = make(map[string]string) + + // Send node proto only in the first request on the stream. + for url, resources := range t.resources { + if len(resources) == 0 { + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) + return false, err + } + sentNodeProto = true + } + + return sentNodeProto, nil +} + +// recv receives xDS responses on the provided ADS stream and branches out to +// message specific handlers. Returns true if at least one message was +// successfully received. +func (t *Transport) recv(ctx context.Context, stream adsStream) bool { + // Initialize the flow control quota for the stream. This helps to block the + // next read until the previous one is consumed by all watchers. + fc := newADSFlowControl() + + msgReceived := false + for { + // Wait for ADS stream level flow control to be available. + if !fc.wait(ctx) { + if t.logger.V(2) { + t.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) + if err != nil { + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) + t.logger.Warningf("ADS stream closed: %v", err) + return msgReceived + } + msgReceived = true + + u := ResourceUpdate{ + Resources: resources, + URL: url, + Version: rVersion, + } + fc.setPending() + if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + t.logger.Warningf("%v", err) + continue + } + // If the data model layer returned an error, we need to NACK the + // response in which case we need to set the version to the most + // recently accepted version of this resource type. + if err != nil { + t.mu.Lock() + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: t.versions[url], + nackErr: err, + }) + t.mu.Unlock() + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) + continue + } + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: rVersion, + }) + if t.logger.V(2) { + t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } + } +} + +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) + for i := range m { + ret = append(ret, i) + } + return ret +} + +func sliceToMap(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + return ret +} + +// processResourceRequest pulls the fields needed to send out an ADS request. +// The resource type and the list of resources to request are provided by the +// user, while the version and nonce are maintained internally. +// +// The resources map, which keeps track of the resources being requested, is +// updated here. Any subsequent stream failure will re-request resources stored +// in this map. +// +// Returns the list of resources, resource type url, version and nonce. +func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { + t.mu.Lock() + defer t.mu.Unlock() + + resources := sliceToMap(req.resources) + t.resources[req.url] = resources + return req.resources, req.url, t.versions[req.url], t.nonces[req.url] +} + +type ackRequest struct { + url string // Resource type URL. + version string // NACK if version is an empty string. + nonce string + nackErr error // nil for ACK, non-nil for NACK. + // ACK/NACK are tagged with the stream it's for. When the stream is down, + // all the ACK/NACK for this stream will be dropped, and the version/nonce + // won't be updated. + stream grpc.ClientStream +} + +// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces +// and versions map is updated. +// +// Returns the list of resources, resource type url, version, nonce, and an +// indication of whether an ACK should be sent on the wire or not. +func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { + if ack.stream != stream { + // If ACK's stream isn't the current sending stream, this means the ACK + // was pushed to queue before the old stream broke, and a new stream has + // been started since. Return immediately here so we don't update the + // nonce for the new stream. + return nil, "", "", "", false + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Update the nonce irrespective of whether we send the ACK request on wire. + // An up-to-date nonce is required for the next request. + nonce := ack.nonce + t.nonces[ack.url] = nonce + + s, ok := t.resources[ack.url] + if !ok || len(s) == 0 { + // We don't send the ACK request if there are no resources of this type + // in our resources map. This can be either when the server sends + // responses before any request, or the resources are removed while the + // ackRequest was in queue). If we send a request with an empty + // resource name list, the server may treat it as a wild card and send + // us everything. + return nil, "", "", "", false + } + resources := mapToSlice(s) + + // Update the versions map only when we plan to send an ACK. + if ack.nackErr == nil { + t.versions[ack.url] = ack.version + } + + return resources, ack.url, ack.version, nonce, true +} + +// Close closes the Transport and frees any associated resources. +func (t *Transport) Close() { + t.adsRunnerCancel() + <-t.adsRunnerDoneCh + t.adsRequestCh.Close() + t.cc.Close() +} + +// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC +// channel to the management server. +// +// Only for testing purposes. +func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { + return t.cc.GetState() +} + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *grpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl() *adsFlowControl { + return &adsFlowControl{readyCh: make(chan struct{}, 1)} +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go new file mode 100644 index 000000000..3c48f1bde --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package converter provides converters to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. These converters are registered by proto type in a registry, +// which gets pulled from based off proto type passed in. +package converter + +import ( + "encoding/json" + "fmt" + "strings" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/leastrequest" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" +) + +func init() { + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest", convertLeastRequestProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + defaultLeastRequestChoiceCount = 2 +) + +func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(rawProto, rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if rhProto.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", rhProto.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhProto.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhProto.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhCfg := &ringhash.LBConfig{ + MinRingSize: minSize, + MaxRingSize: maxSize, + } + + rhCfgJSON, err := json.Marshal(rhCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", rhCfg, err) + } + return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil +} + +type pfConfig struct { + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + pfProto := &v3pickfirstpb.PickFirst{} + if err := proto.Unmarshal(rawProto, pfProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + pfCfg := &pfConfig{ShuffleAddressList: pfProto.GetShuffleAddressList()} + js, err := json.Marshal(pfCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) + } + return makeBalancerConfigJSON(pickfirst.Name, js), nil +} + +func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { + return makeBalancerConfigJSON(roundrobin.Name, json.RawMessage("{}")), nil +} + +type wrrLocalityLBConfig struct { + ChildPolicy json.RawMessage `json:"childPolicy,omitempty"` +} + +func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + epJSON, err := xdslbregistry.ConvertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) + } + wrrLCfg := wrrLocalityLBConfig{ + ChildPolicy: epJSON, + } + + lbCfgJSON, err := json.Marshal(wrrLCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLCfg, err) + } + return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil +} + +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} + if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + wrrLBCfg := &wrrLBConfig{} + // Only set fields if specified in proto. If not set, ParseConfig of the WRR + // will populate the config with defaults. + if enableOOBLoadReportCfg := cswrrProto.GetEnableOobLoadReport(); enableOOBLoadReportCfg != nil { + wrrLBCfg.EnableOOBLoadReport = enableOOBLoadReportCfg.GetValue() + } + if oobReportingPeriodCfg := cswrrProto.GetOobReportingPeriod(); oobReportingPeriodCfg != nil { + wrrLBCfg.OOBReportingPeriod = internalserviceconfig.Duration(oobReportingPeriodCfg.AsDuration()) + } + if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { + wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) + } + if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) + } + if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { + wrrLBCfg.WeightUpdatePeriod = internalserviceconfig.Duration(weightUpdatePeriodCfg.AsDuration()) + } + if errorUtilizationPenaltyCfg := cswrrProto.GetErrorUtilizationPenalty(); errorUtilizationPenaltyCfg != nil { + wrrLBCfg.ErrorUtilizationPenalty = float64(errorUtilizationPenaltyCfg.GetValue()) + } + + lbCfgJSON, err := json.Marshal(wrrLBCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLBCfg, err) + } + return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil +} + +func convertLeastRequestProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.LeastRequestLB { + return nil, nil + } + lrProto := &v3leastrequestpb.LeastRequest{} + if err := proto.Unmarshal(rawProto, lrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + choiceCount := uint32(defaultLeastRequestChoiceCount) + if cc := lrProto.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + lrCfg := &leastrequest.LBConfig{ChoiceCount: choiceCount} + js, err := json.Marshal(lrCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", lrCfg, err) + } + return makeBalancerConfigJSON(leastrequest.Name, js), nil +} + +func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v1xdsudpatypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +func convertV3TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v3xdsxdstypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json and an error which should cause caller to error if error +// converting. If both json and error returned are nil, it means the gRPC +// Balancer registry does not contain that balancer type, and the caller should +// continue to the next policy. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + pos := strings.LastIndex(typeURL, "/") + name := typeURL[pos+1:] + + if balancer.Get(name) == nil { + return nil, nil + } + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +type wrrLBConfig struct { + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod internalserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod internalserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod internalserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod internalserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go new file mode 100644 index 000000000..a7782709d --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides a registry of converters that convert proto +// from load balancing configuration, defined by the xDS API spec, to JSON load +// balancing configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" +) + +var ( + // m is a map from proto type to Converter. + m = make(map[string]Converter) +) + +// Register registers the converter to the map keyed on a proto type. Must be +// called at init time. Not thread safe. +func Register(protoType string, c Converter) { + m[protoType] = c +} + +// SetRegistry sets the xDS LB registry. Must be called at init time. Not thread +// safe. +func SetRegistry(registry map[string]Converter) { + m = registry +} + +// Converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type Converter func([]byte, int) (json.RawMessage, error) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go new file mode 100644 index 000000000..18d47cbc1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ClusterResourceTypeName represents the transport agnostic name for the + // cluster resource. + ClusterResourceTypeName = "ClusterResource" +) + +var ( + // Compile time interface checks. + _ Type = clusterResourceType{} + + // Singleton instantiation of the resource type implementation. + clusterType = clusterResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ClusterURL, + typeName: ClusterResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// clusterResourceType provides the resource-type specific functionality for a +// Cluster resource. +// +// Implements the Type interface. +type clusterResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, cluster, err := unmarshalClusterResource(resource, opts.ServerConfig) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + // Perform extra validation here. + if err := securityConfigValidator(opts.BootstrapConfig, cluster.SecurityCfg); err != nil { + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: cluster}}, nil + +} + +// ClusterResourceData wraps the configuration of a Cluster resource as received +// from the management server. +// +// Implements the ResourceData interface. +type ClusterResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ClusterUpdate +} + +// Equal returns true if other is equal to r. +func (c *ClusterResourceData) Equal(other ResourceData) bool { + if c == nil && other == nil { + return true + } + if (c == nil) != (other == nil) { + return false + } + return proto.Equal(c.Resource.Raw, other.Raw()) +} + +// ToJSON returns a JSON string representation of the resource data. +func (c *ClusterResourceData) ToJSON() string { + return pretty.ToJSON(c.Resource) +} + +// Raw returns the underlying raw protobuf form of the cluster resource. +func (c *ClusterResourceData) Raw() *anypb.Any { + return c.Resource.Raw +} + +// ClusterWatcher wraps the callbacks to be invoked for different events +// corresponding to the cluster resource being watched. +type ClusterWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ClusterResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingClusterWatcher struct { + watcher ClusterWatcher +} + +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + c := data.(*ClusterResourceData) + d.watcher.OnUpdate(c, onDone) +} + +func (d *delegatingClusterWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingClusterWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchCluster uses xDS to discover the configuration associated with the +// provided cluster resource name. +func WatchCluster(p Producer, name string, w ClusterWatcher) (cancel func()) { + delegator := &delegatingClusterWatcher{watcher: w} + return p.WatchResource(clusterType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go new file mode 100644 index 000000000..66c0ae0b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // EndpointsResourceTypeName represents the transport agnostic name for the + // endpoint resource. + EndpointsResourceTypeName = "EndpointsResource" +) + +var ( + // Compile time interface checks. + _ Type = endpointsResourceType{} + + // Singleton instantiation of the resource type implementation. + endpointsType = endpointsResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3EndpointsURL, + typeName: "EndpointsResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// endpointsResourceType provides the resource-type specific functionality for a +// ClusterLoadAssignment (or Endpoints) resource. +// +// Implements the Type interface. +type endpointsResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (endpointsResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalEndpointsResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: EndpointsUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: rc}}, nil + +} + +// EndpointsResourceData wraps the configuration of an Endpoints resource as +// received from the management server. +// +// Implements the ResourceData interface. +type EndpointsResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource EndpointsUpdate +} + +// Equal returns true if other is equal to r. +func (e *EndpointsResourceData) Equal(other ResourceData) bool { + if e == nil && other == nil { + return true + } + if (e == nil) != (other == nil) { + return false + } + return proto.Equal(e.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (e *EndpointsResourceData) ToJSON() string { + return pretty.ToJSON(e.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (e *EndpointsResourceData) Raw() *anypb.Any { + return e.Resource.Raw +} + +// EndpointsWatcher wraps the callbacks to be invoked for different +// events corresponding to the endpoints resource being watched. +type EndpointsWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*EndpointsResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingEndpointsWatcher struct { + watcher EndpointsWatcher +} + +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + e := data.(*EndpointsResourceData) + d.watcher.OnUpdate(e, onDone) +} + +func (d *delegatingEndpointsWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchEndpoints uses xDS to discover the configuration associated with the +// provided endpoints resource name. +func WatchEndpoints(p Producer, name string, w EndpointsWatcher) (cancel func()) { + delegator := &delegatingEndpointsWatcher{watcher: w} + return p.WatchResource(endpointsType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go new file mode 100644 index 000000000..7bac4469b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import "fmt" + +// ErrorType is the type of the error that the watcher will receive from the xds +// client. +type ErrorType int + +const ( + // ErrorTypeUnknown indicates the error doesn't have a specific type. It is + // the default value, and is returned if the error is not an xds error. + ErrorTypeUnknown ErrorType = iota + // ErrorTypeConnection indicates a connection error from the gRPC client. + ErrorTypeConnection + // ErrorTypeResourceNotFound indicates a resource is not found from the xds + // response. It's typically returned if the resource is removed in the xds + // server. + ErrorTypeResourceNotFound + // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from + // the management server with resources of an unsupported resource type. + ErrorTypeResourceTypeUnsupported + // ErrTypeStreamFailedAfterRecv indicates an ADS stream error, after + // successful receipt of at least one message from the server. + ErrTypeStreamFailedAfterRecv +) + +type xdsClientError struct { + t ErrorType + desc string +} + +func (e *xdsClientError) Error() string { + return e.desc +} + +// NewErrorf creates an xds client error. The callbacks are called with this +// error, to pass additional information about the error. +func NewErrorf(t ErrorType, format string, args ...any) error { + return &xdsClientError{t: t, desc: fmt.Sprintf(format, args...)} +} + +// ErrType returns the error's type. +func ErrType(e error) ErrorType { + if xe, ok := e.(*xdsClientError); ok { + return xe.t + } + return ErrorTypeUnknown +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go new file mode 100644 index 000000000..196bb9f87 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -0,0 +1,895 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" +) + +const ( + // Used as the map key for unspecified prefixes. The actual value of this + // key is immaterial. + unspecifiedPrefixMapKey = "unspecified" + + // An unspecified destination or source prefix should be considered a less + // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an + // unspecified prefix should match most v4 and v6 addresses compared to the + // wildcard prefixes which match only a specific network (v4 or v6). + // + // We use these constants when looking up the most specific prefix match. A + // wildcard prefix will match 0 bits, and to make sure that a wildcard + // prefix is considered a more specific match than an unspecified prefix, we + // use a value of -1 for the latter. + noPrefixMatch = -2 + unspecifiedPrefixMatch = -1 +) + +// FilterChain captures information from within a FilterChain message in a +// Listener resource. +type FilterChain struct { + // SecurityCfg contains transport socket security configuration. + SecurityCfg *SecurityConfig + // HTTPFilters represent the HTTP Filters that comprise this FilterChain. + HTTPFilters []HTTPFilter + // RouteConfigName is the route configuration name for this FilterChain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned for this filter chain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + // UsableRouteConfiguration is the routing configuration for this filter + // chain (LDS + RDS). + UsableRouteConfiguration *atomic.Pointer[UsableRouteConfiguration] +} + +// VirtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type VirtualHostWithInterceptors struct { + // Domains are the domain names which map to this Virtual Host. On the + // server side, this will be dictated by the :authority header of the + // incoming RPC. + Domains []string + // Routes are the Routes for this Virtual Host. + Routes []RouteWithInterceptors +} + +// RouteWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type RouteWithInterceptors struct { + // M is the matcher used to match to this route. + M *CompositeMatcher + // ActionType is the type of routing action to initiate once matched to. + ActionType RouteActionType + // Interceptors are interceptors instantiated for this route. These will be + // constructed from a combination of the top level configuration and any + // HTTP Filter overrides present in Virtual Host or Route. + Interceptors []resolver.ServerInterceptor +} + +// UsableRouteConfiguration contains a matchable route configuration, with +// instantiated HTTP Filters per route. +type UsableRouteConfiguration struct { + VHS []VirtualHostWithInterceptors + Err error +} + +// ConstructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (fc *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) *UsableRouteConfiguration { + vhs := make([]VirtualHostWithInterceptors, 0, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := fc.convertVirtualHost(vh) + if err != nil { + // Non nil if (lds + rds) fails, shouldn't happen since validated by + // xDS Client, treat as L7 error but shouldn't happen. + return &UsableRouteConfiguration{Err: fmt.Errorf("virtual host construction: %v", err)} + } + vhs = append(vhs, vhwi) + } + return &UsableRouteConfiguration{VHS: vhs} +} + +func (fc *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { + rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + var err error + rs[i].ActionType = r.ActionType + rs[i].M, err = RouteToMatcher(r) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) + } + for _, filter := range fc.HTTPFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].Interceptors = append(rs[i].Interceptors, si) + } + } + } + return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil +} + +// SourceType specifies the connection source IP match type. +type SourceType int + +const ( + // SourceTypeAny matches connection attempts from any source. + SourceTypeAny SourceType = iota + // SourceTypeSameOrLoopback matches connection attempts from the same host. + SourceTypeSameOrLoopback + // SourceTypeExternal matches connection attempts from a different host. + SourceTypeExternal +) + +// FilterChainManager contains all the match criteria specified through all +// filter chains in a single Listener resource. It also contains the default +// filter chain specified in the Listener resource. It provides two important +// pieces of functionality: +// 1. Validate the filter chains in an incoming Listener resource to make sure +// that there aren't filter chains which contain the same match criteria. +// 2. As part of performing the above validation, it builds an internal data +// structure which will if used to look up the matching filter chain at +// connection time. +// +// The logic specified in the documentation around the xDS FilterChainMatch +// proto mentions 8 criteria to match on. +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +type FilterChainManager struct { + // Destination prefix is the first match criteria that we support. + // Therefore, this multi-stage map is indexed on destination prefixes + // specified in the match criteria. + // Unspecified destination prefix matches end up as a wildcard entry here + // with a key of 0.0.0.0/0. + dstPrefixMap map[string]*destPrefixEntry + + // At connection time, we do not have the actual destination prefix to match + // on. We only have the real destination address of the incoming connection. + // This means that we cannot use the above map at connection time. This list + // contains the map entries from the above map that we can use at connection + // time to find matching destination prefixes in O(n) time. + // + // TODO: Implement LC-trie to support logarithmic time lookups. If that + // involves too much time/effort, sort this slice based on the netmask size. + dstPrefixes []*destPrefixEntry + + def *FilterChain // Default filter chain, if specified. + + // Slice of filter chains managed by this filter chain manager. + fcs []*FilterChain + + // RouteConfigNames are the route configuration names which need to be + // dynamically queried for RDS Configuration for any FilterChains which + // specify to load RDS Configuration dynamically. + RouteConfigNames map[string]bool +} + +// destPrefixEntry is the value type of the map indexed on destination prefixes. +type destPrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // We need to keep track of the transport protocols seen as part of the + // config validation (and internal structure building) phase. The only two + // values that we support are empty string and "raw_buffer", with the latter + // taking preference. Once we have seen one filter chain with "raw_buffer", + // we can drop everything other filter chain with an empty transport + // protocol. + rawBufferSeen bool + // For each specified source type in the filter chain match criteria, this + // array points to the set of specified source prefixes. + // Unspecified source type matches end up as a wildcard entry here with an + // index of 0, which actually represents the source type `ANY`. + srcTypeArr sourceTypesArray +} + +// An array for the fixed number of source types that we have. +type sourceTypesArray [3]*sourcePrefixes + +// sourcePrefixes contains source prefix related information specified in the +// match criteria. These are pointed to by the array of source types. +type sourcePrefixes struct { + // These are very similar to the 'dstPrefixMap' and 'dstPrefixes' field of + // FilterChainManager. Go there for more info. + srcPrefixMap map[string]*sourcePrefixEntry + srcPrefixes []*sourcePrefixEntry +} + +// sourcePrefixEntry contains match criteria per source prefix. +type sourcePrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // Mapping from source ports specified in the match criteria to the actual + // filter chain. Unspecified source port matches en up as a wildcard entry + // here with a key of 0. + srcPortMap map[int]*FilterChain +} + +// NewFilterChainManager parses the received Listener resource and builds a +// FilterChainManager. Returns a non-nil error on validation failures. +// +// This function is only exported so that tests outside of this package can +// create a FilterChainManager. +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { + // Parse all the filter chains and build the internal data structures. + fci := &FilterChainManager{ + dstPrefixMap: make(map[string]*destPrefixEntry), + RouteConfigNames: make(map[string]bool), + } + if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { + return nil, err + } + // Build the source and dest prefix slices used by Lookup(). + fcSeen := false + for _, dstPrefix := range fci.dstPrefixMap { + fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) + for _, st := range dstPrefix.srcTypeArr { + if st == nil { + continue + } + for _, srcPrefix := range st.srcPrefixMap { + st.srcPrefixes = append(st.srcPrefixes, srcPrefix) + for _, fc := range srcPrefix.srcPortMap { + if fc != nil { + fcSeen = true + } + } + } + } + } + + // Retrieve the default filter chain. The match criteria specified on the + // default filter chain is never used. The default filter chain simply gets + // used when none of the other filter chains match. + var def *FilterChain + if dfc := lis.GetDefaultFilterChain(); dfc != nil { + var err error + if def, err = fci.filterChainFromProto(dfc); err != nil { + return nil, err + } + } + fci.def = def + if fci.def != nil { + fci.fcs = append(fci.fcs, fci.def) + } + + // If there are no supported filter chains and no default filter chain, we + // fail here. This will call the Listener resource to be NACK'ed. + if !fcSeen && fci.def == nil { + return nil, fmt.Errorf("no supported filter chains and no default filter chain") + } + return fci, nil +} + +// addFilterChains parses the filter chains in fcs and adds the required +// internal data structures corresponding to the match criteria. +func (fcm *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { + for _, fc := range fcs { + fcMatch := fc.GetFilterChainMatch() + if fcMatch.GetDestinationPort().GetValue() != 0 { + // Destination port is the first match criteria and we do not + // support filter chains which contains this match criteria. + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + continue + } + + // Build the internal representation of the filter chain match fields. + if err := fcm.addFilterChainsForDestPrefixes(fc); err != nil { + return err + } + } + + return nil +} + +func (fcm *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + + if len(dstPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if fcm.dstPrefixMap[unspecifiedPrefixMapKey] == nil { + fcm.dstPrefixMap[unspecifiedPrefixMapKey] = &destPrefixEntry{} + } + return fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range dstPrefixes { + p := prefix.String() + if fcm.dstPrefixMap[p] == nil { + fcm.dstPrefixMap[p] = &destPrefixEntry{net: prefix} + } + if err := fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +func (fcm *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + // Filter chains specifying server names in their match criteria always fail + // a match at connection time. So, these filter chains can be dropped now. + if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + return nil + } + + return fcm.addFilterChainsForTransportProtocols(dstEntry, fc) +} + +func (fcm *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + tp := fc.GetFilterChainMatch().GetTransportProtocol() + switch { + case tp != "" && tp != "raw_buffer": + // Only allow filter chains with transport protocol set to empty string + // or "raw_buffer". + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp == "" && dstEntry.rawBufferSeen: + // If we have already seen filter chains with transport protocol set to + // "raw_buffer", we can drop filter chains with transport protocol set + // to empty string, since the former takes precedence. + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp != "" && !dstEntry.rawBufferSeen: + // This is the first "raw_buffer" that we are seeing. Set the bit and + // reset the source types array which might contain entries for filter + // chains with transport protocol set to empty string. + dstEntry.rawBufferSeen = true + dstEntry.srcTypeArr = sourceTypesArray{} + } + return fcm.addFilterChainsForApplicationProtocols(dstEntry, fc) +} + +func (fcm *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + return nil + } + return fcm.addFilterChainsForSourceType(dstEntry, fc) +} + +// addFilterChainsForSourceType adds source types to the internal data +// structures and delegates control to addFilterChainsForSourcePrefixes to +// continue building the internal data structure. +func (fcm *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcType SourceType + switch st := fc.GetFilterChainMatch().GetSourceType(); st { + case v3listenerpb.FilterChainMatch_ANY: + srcType = SourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = SourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = SourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", st) + } + + st := int(srcType) + if dstEntry.srcTypeArr[st] == nil { + dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} + } + return fcm.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, fc) +} + +// addFilterChainsForSourcePrefixes adds source prefixes to the internal data +// structures and delegates control to addFilterChainsForSourcePorts to continue +// building the internal data structure. +func (fcm *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + srcPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + srcPrefixes = append(srcPrefixes, ipnet) + } + + if len(srcPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if srcPrefixMap[unspecifiedPrefixMapKey] == nil { + srcPrefixMap[unspecifiedPrefixMapKey] = &sourcePrefixEntry{ + srcPortMap: make(map[int]*FilterChain), + } + } + return fcm.addFilterChainsForSourcePorts(srcPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range srcPrefixes { + p := prefix.String() + if srcPrefixMap[p] == nil { + srcPrefixMap[p] = &sourcePrefixEntry{ + net: prefix, + srcPortMap: make(map[int]*FilterChain), + } + } + if err := fcm.addFilterChainsForSourcePorts(srcPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +// addFilterChainsForSourcePorts adds source ports to the internal data +// structures and completes the process of building the internal data structure. +// It is here that we determine if there are multiple filter chains with +// overlapping matching rules. +func (fcm *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { + ports := fcProto.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { + srcPorts = append(srcPorts, int(port)) + } + + fc, err := fcm.filterChainFromProto(fcProto) + if err != nil { + return err + } + + if len(srcPorts) == 0 { + // Use the wildcard port '0', when source ports are unspecified. + if curFC := srcEntry.srcPortMap[0]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[0] = fc + fcm.fcs = append(fcm.fcs, fc) + return nil + } + for _, port := range srcPorts { + if curFC := srcEntry.srcPortMap[port]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[port] = fc + } + fcm.fcs = append(fcm.fcs, fc) + return nil +} + +// FilterChains returns the filter chains for this filter chain manager. +func (fcm *FilterChainManager) FilterChains() []*FilterChain { + return fcm.fcs +} + +// filterChainFromProto extracts the relevant information from the FilterChain +// proto and stores it in our internal representation. It also persists any +// RouteNames which need to be queried dynamically via RDS. +func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + filterChain, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return nil, err + } + // These route names will be dynamically queried via RDS in the wrapped + // listener, which receives the LDS response, if specified for the filter + // chain. + if filterChain.RouteConfigName != "" { + fcm.RouteConfigNames[filterChain.RouteConfigName] = true + } + // If the transport_socket field is not specified, it means that the control + // plane has not sent us any security config. This is fine and the server + // will use the fallback credentials configured as part of the + // xdsCredentials. + ts := fc.GetTransportSocket() + if ts == nil { + return filterChain, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) + } + downstreamCtx := &v3tlspb.DownstreamTlsContext{} + if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) + } + if downstreamCtx.GetRequireSni().GetValue() { + return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + // The following fields from `DownstreamTlsContext` are ignore: + // - disable_stateless_session_resumption + // - session_ticket_keys + // - session_ticket_keys_sds_secret_config + // - session_timeout + if downstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") + } + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) + if err != nil { + return nil, err + } + if sc == nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + return filterChain, nil + } + sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() + if sc.RequireClientCert && sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") + } + filterChain.SecurityCfg = sc + return filterChain, nil +} + +// Validate takes a function to validate the FilterChains in this manager. +func (fcm *FilterChainManager) Validate(f func(fc *FilterChain) error) error { + for _, dst := range fcm.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := f(fc); err != nil { + return err + } + } + } + } + } + return f(fcm.def) +} + +func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { + rc := &UsableRouteConfiguration{} + filterChain := &FilterChain{ + UsableRouteConfiguration: &atomic.Pointer[UsableRouteConfiguration]{}, + } + filterChain.UsableRouteConfiguration.Store(rc) + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := tc.UnmarshalTo(hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshalling of network filter {%+v}: %v", filters, filter, err) + } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) + } + if !seenHCM { + // Validate for RBAC in only the HCM that will be used, since this isn't a logical validation failure, + // it's simply a validation to support RBAC HTTP Filter. + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if hcm.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", hcm) + } + if len(hcm.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", hcm) + } + + // TODO: Implement terminal filter logic, as per A36. + filterChain.HTTPFilters = filters + seenHCM = true + switch hcm.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if hcm.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", hcm) + } + name := hcm.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", hcm) + } + filterChain.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + // "RouteConfiguration validation logic inherits all + // previous validations made for client-side usage as RDS + // does not distinguish between client-side and + // server-side." - A36 + // Can specify v3 here, as will never get to this function + // if v2. + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + filterChain.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", hcm) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) + } + } + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) + } + } + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return filterChain, nil +} + +// FilterChainLookupParams wraps parameters to be passed to Lookup. +type FilterChainLookupParams struct { + // IsUnspecified indicates whether the server is listening on a wildcard + // address, "0.0.0.0" for IPv4 and "::" for IPv6. Only when this is set to + // true, do we consider the destination prefixes specified in the filter + // chain match criteria. + IsUnspecifiedListener bool + // DestAddr is the local address of an incoming connection. + DestAddr net.IP + // SourceAddr is the remote address of an incoming connection. + SourceAddr net.IP + // SourcePort is the remote port of an incoming connection. + SourcePort int +} + +// Lookup returns the most specific matching filter chain to be used for an +// incoming connection on the server side. +// +// Returns a non-nil error if no matching filter chain could be found or +// multiple matching filter chains were found, and in both cases, the incoming +// connection must be dropped. +func (fcm *FilterChainManager) Lookup(params FilterChainLookupParams) (*FilterChain, error) { + dstPrefixes := filterByDestinationPrefixes(fcm.dstPrefixes, params.IsUnspecifiedListener, params.DestAddr) + if len(dstPrefixes) == 0 { + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) + } + + srcType := SourceTypeExternal + if params.SourceAddr.Equal(params.DestAddr) || params.SourceAddr.IsLoopback() { + srcType = SourceTypeSameOrLoopback + } + srcPrefixes := filterBySourceType(dstPrefixes, srcType) + if len(srcPrefixes) == 0 { + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) + } + srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.SourceAddr) + if err != nil { + return nil, err + } + if fc := filterBySourcePorts(srcPrefixEntry, params.SourcePort); fc != nil { + return fc, nil + } + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) +} + +// filterByDestinationPrefixes is the first stage of the filter chain +// matching algorithm. It takes the complete set of configured filter chain +// matchers and returns the most specific matchers based on the destination +// prefix match criteria (the prefixes which match the most number of bits). +func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { + if !isUnspecified { + // Destination prefix matchers are considered only when the listener is + // bound to the wildcard address. + return dstPrefixes + } + + var matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch := noPrefixMatch + for _, prefix := range dstPrefixes { + if prefix.net != nil && !prefix.net.Contains(dstAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) + } + matchingDstPrefixes = append(matchingDstPrefixes, prefix) + } + return matchingDstPrefixes +} + +// filterBySourceType is the second stage of the matching algorithm. It +// trims the filter chains based on the most specific source type match. +func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*sourcePrefixes { + var ( + srcPrefixes []*sourcePrefixes + bestSrcTypeMatch int + ) + for _, prefix := range dstPrefixes { + var ( + srcPrefix *sourcePrefixes + match int + ) + switch srcType { + case SourceTypeExternal: + match = int(SourceTypeExternal) + srcPrefix = prefix.srcTypeArr[match] + case SourceTypeSameOrLoopback: + match = int(SourceTypeSameOrLoopback) + srcPrefix = prefix.srcTypeArr[match] + } + if srcPrefix == nil { + match = int(SourceTypeAny) + srcPrefix = prefix.srcTypeArr[match] + } + if match < bestSrcTypeMatch { + continue + } + if match > bestSrcTypeMatch { + bestSrcTypeMatch = match + srcPrefixes = make([]*sourcePrefixes, 0) + } + if srcPrefix != nil { + // The source type array always has 3 entries, but these could be + // nil if the appropriate source type match was not specified. + srcPrefixes = append(srcPrefixes, srcPrefix) + } + } + return srcPrefixes +} + +// filterBySourcePrefixes is the third stage of the filter chain matching +// algorithm. It trims the filter chains based on the source prefix. At most one +// filter chain with the most specific match progress to the next stage. +func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { + var matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch := noPrefixMatch + for _, sp := range srcPrefixes { + for _, prefix := range sp.srcPrefixes { + if prefix.net != nil && !prefix.net.Contains(srcAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) + } + matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) + } + } + if len(matchingSrcPrefixes) == 0 { + // Finding no match is not an error condition. The caller will end up + // using the default filter chain if one was configured. + return nil, nil + } + // We expect at most a single matching source prefix entry at this point. If + // we have multiple entries here, and some of their source port matchers had + // wildcard entries, we could be left with more than one matching filter + // chain and hence would have been flagged as an invalid configuration at + // config validation time. + if len(matchingSrcPrefixes) != 1 { + return nil, errors.New("multiple matching filter chains") + } + return matchingSrcPrefixes[0], nil +} + +// filterBySourcePorts is the last stage of the filter chain matching +// algorithm. It trims the filter chains based on the source ports. +func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *FilterChain { + if spe == nil { + return nil + } + // A match could be a wildcard match (this happens when the match + // criteria does not specify source ports) or a specific port match (this + // happens when the match criteria specifies a set of ports and the source + // port of the incoming connection matches one of the specified ports). The + // latter is considered to be a more specific match. + if fc := spe.srcPortMap[srcPort]; fc != nil { + return fc + } + if fc := spe.srcPortMap[0]; fc != nil { + return fc + } + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go new file mode 100644 index 000000000..80fa5e6a2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ListenerResourceTypeName represents the transport agnostic name for the + // listener resource. + ListenerResourceTypeName = "ListenerResource" +) + +var ( + // Compile time interface checks. + _ Type = listenerResourceType{} + + // Singleton instantiation of the resource type implementation. + listenerType = listenerResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ListenerURL, + typeName: ListenerResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// listenerResourceType provides the resource-type specific functionality for a +// Listener resource. +// +// Implements the Type interface. +type listenerResourceType struct { + resourceTypeState +} + +func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { + if lis.InboundListenerCfg == nil || lis.InboundListenerCfg.FilterChains == nil { + return nil + } + return lis.InboundListenerCfg.FilterChains.Validate(func(fc *FilterChain) error { + if fc == nil { + return nil + } + return securityConfigValidator(bc, fc.SecurityCfg) + }) +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, listener, err := unmarshalListenerResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + // Perform extra validation here. + if err := listenerValidator(opts.BootstrapConfig, listener); err != nil { + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: listener}}, nil + +} + +// ListenerResourceData wraps the configuration of a Listener resource as +// received from the management server. +// +// Implements the ResourceData interface. +type ListenerResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ListenerUpdate +} + +// Equal returns true if other is equal to l. +func (l *ListenerResourceData) Equal(other ResourceData) bool { + if l == nil && other == nil { + return true + } + if (l == nil) != (other == nil) { + return false + } + return proto.Equal(l.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (l *ListenerResourceData) ToJSON() string { + return pretty.ToJSON(l.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (l *ListenerResourceData) Raw() *anypb.Any { + return l.Resource.Raw +} + +// ListenerWatcher wraps the callbacks to be invoked for different +// events corresponding to the listener resource being watched. +type ListenerWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ListenerResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingListenerWatcher struct { + watcher ListenerWatcher +} + +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + l := data.(*ListenerResourceData) + d.watcher.OnUpdate(l, onDone) +} + +func (d *delegatingListenerWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingListenerWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchListener uses xDS to discover the configuration associated with the +// provided listener resource name. +func WatchListener(p Producer, name string, w ListenerWatcher) (cancel func()) { + delegator := &delegatingListenerWatcher{watcher: w} + return p.WatchResource(listenerType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go new file mode 100644 index 000000000..62bcb016b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resource] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go new file mode 100644 index 000000000..796e9e300 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -0,0 +1,280 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math/rand" + "strings" + + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/metadata" +) + +// RouteToMatcher converts a route to a Matcher to match incoming RPC's against. +func RouteToMatcher(r *Route) (*CompositeMatcher, error) { + var pm pathMatcher + switch { + case r.Regex != nil: + pm = newPathRegexMatcher(r.Regex) + case r.Path != nil: + pm = newPathExactMatcher(*r.Path, r.CaseInsensitive) + case r.Prefix != nil: + pm = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) + default: + return nil, fmt.Errorf("illegal route: missing path_matcher") + } + + headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) + for _, h := range r.Headers { + var matcherT matcher.HeaderMatcher + invert := h.InvertMatch != nil && *h.InvertMatch + switch { + case h.ExactMatch != nil && *h.ExactMatch != "": + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch, invert) + case h.RegexMatch != nil: + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch, invert) + case h.PrefixMatch != nil && *h.PrefixMatch != "": + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch, invert) + case h.SuffixMatch != nil && *h.SuffixMatch != "": + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch, invert) + case h.RangeMatch != nil: + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) + case h.PresentMatch != nil: + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) + case h.StringMatch != nil: + matcherT = matcher.NewHeaderStringMatcher(h.Name, *h.StringMatch, invert) + default: + return nil, fmt.Errorf("illegal route: missing header_match_specifier") + } + headerMatchers = append(headerMatchers, matcherT) + } + + var fractionMatcher *fractionMatcher + if r.Fraction != nil { + fractionMatcher = newFractionMatcher(*r.Fraction) + } + return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil +} + +// CompositeMatcher is a matcher that holds onto many matchers and aggregates +// the matching results. +type CompositeMatcher struct { + pm pathMatcher + hms []matcher.HeaderMatcher + fm *fractionMatcher +} + +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *CompositeMatcher { + return &CompositeMatcher{pm: pm, hms: hms, fm: fm} +} + +// Match returns true if all matchers return true. +func (a *CompositeMatcher) Match(info iresolver.RPCInfo) bool { + if a.pm != nil && !a.pm.match(info.Method) { + return false + } + + // Call headerMatchers even if md is nil, because routes may match + // non-presence of some headers. + var md metadata.MD + if info.Context != nil { + md, _ = metadata.FromOutgoingContext(info.Context) + if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { + md = metadata.Join(md, extraMD) + // Remove all binary headers. They are hard to match with. May need + // to add back if asked by users. + for k := range md { + if strings.HasSuffix(k, "-bin") { + delete(md, k) + } + } + } + } + for _, m := range a.hms { + if !m.Match(md) { + return false + } + } + + if a.fm != nil && !a.fm.match() { + return false + } + return true +} + +func (a *CompositeMatcher) String() string { + var ret string + if a.pm != nil { + ret += a.pm.String() + } + for _, m := range a.hms { + ret += m.String() + } + if a.fm != nil { + ret += a.fm.String() + } + return ret +} + +type fractionMatcher struct { + fraction int64 // real fraction is fraction/1,000,000. +} + +func newFractionMatcher(fraction uint32) *fractionMatcher { + return &fractionMatcher{fraction: int64(fraction)} +} + +// RandInt63n overwrites rand for control in tests. +var RandInt63n = rand.Int63n + +func (fm *fractionMatcher) match() bool { + t := RandInt63n(1000000) + return t <= fm.fraction +} + +func (fm *fractionMatcher) String() string { + return fmt.Sprintf("fraction:%v", fm.fraction) +} + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} + +// FindBestMatchingVirtualHost returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better. +// * Exact match > suffix match > prefix match > universal match. +// +// - If two matches are of the same pattern type, the longer match is +// better. +// * This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client + var ( + matchVh *VirtualHost + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, host) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches authority. +func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { + var ( + matchVh *VirtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go new file mode 100644 index 000000000..da487e20c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +type pathMatcher interface { + match(path string) bool + String() string +} + +type pathExactMatcher struct { + // fullPath is all upper case if caseInsensitive is true. + fullPath string + caseInsensitive bool +} + +func newPathExactMatcher(p string, caseInsensitive bool) *pathExactMatcher { + ret := &pathExactMatcher{ + fullPath: p, + caseInsensitive: caseInsensitive, + } + if caseInsensitive { + ret.fullPath = strings.ToUpper(p) + } + return ret +} + +func (pem *pathExactMatcher) match(path string) bool { + if pem.caseInsensitive { + return pem.fullPath == strings.ToUpper(path) + } + return pem.fullPath == path +} + +func (pem *pathExactMatcher) String() string { + return "pathExact:" + pem.fullPath +} + +type pathPrefixMatcher struct { + // prefix is all upper case if caseInsensitive is true. + prefix string + caseInsensitive bool +} + +func newPathPrefixMatcher(p string, caseInsensitive bool) *pathPrefixMatcher { + ret := &pathPrefixMatcher{ + prefix: p, + caseInsensitive: caseInsensitive, + } + if caseInsensitive { + ret.prefix = strings.ToUpper(p) + } + return ret +} + +func (ppm *pathPrefixMatcher) match(path string) bool { + if ppm.caseInsensitive { + return strings.HasPrefix(strings.ToUpper(path), ppm.prefix) + } + return strings.HasPrefix(path, ppm.prefix) +} + +func (ppm *pathPrefixMatcher) String() string { + return "pathPrefix:" + ppm.prefix +} + +type pathRegexMatcher struct { + re *regexp.Regexp +} + +func newPathRegexMatcher(re *regexp.Regexp) *pathRegexMatcher { + return &pathRegexMatcher{re: re} +} + +func (prm *pathRegexMatcher) match(path string) bool { + return grpcutil.FullMatchWithRegex(prm.re, path) +} + +func (prm *pathRegexMatcher) String() string { + return "pathRegex:" + prm.re.String() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go new file mode 100644 index 000000000..24200ea8d --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "net/url" + "sort" + "strings" +) + +// FederationScheme is the scheme of a federation resource name. +const FederationScheme = "xdstp" + +// Name contains the parsed component of an xDS resource name. +// +// An xDS resource name is in the format of +// xdstp://[{authority}]/{resource type}/{id/*}?{context parameters}{#processing directive,*} +// +// See +// https://github.com/cncf/xds/blob/main/proposals/TP1-xds-transport-next.md#uri-based-xds-resource-names +// for details, and examples. +type Name struct { + Scheme string + Authority string + Type string + ID string + + ContextParams map[string]string + + processingDirective string +} + +// ParseName splits the name and returns a struct representation of the Name. +// +// If the name isn't a valid new-style xDS name, field ID is set to the input. +// Note that this is not an error, because we still support the old-style +// resource names (those not starting with "xdstp:"). +// +// The caller can tell if the parsing is successful by checking the returned +// Scheme. +func ParseName(name string) *Name { + if !strings.Contains(name, "://") { + // Only the long form URL, with ://, is valid. + return &Name{ID: name} + } + parsed, err := url.Parse(name) + if err != nil { + return &Name{ID: name} + } + + ret := &Name{ + Scheme: parsed.Scheme, + Authority: parsed.Host, + } + split := strings.SplitN(parsed.Path, "/", 3) + if len(split) < 3 { + // Path is in the format of "/type/id". There must be at least 3 + // segments after splitting. + return &Name{ID: name} + } + ret.Type = split[1] + ret.ID = split[2] + if len(parsed.Query()) != 0 { + ret.ContextParams = make(map[string]string) + for k, vs := range parsed.Query() { + if len(vs) > 0 { + // We only keep one value of each key. Behavior for multiple values + // is undefined. + ret.ContextParams[k] = vs[0] + } + } + } + // TODO: processing directive (the part comes after "#" in the URL, stored + // in parsed.RawFragment) is kept but not processed. Add support for that + // when it's needed. + ret.processingDirective = parsed.RawFragment + return ret +} + +// String returns a canonicalized string of name. The context parameters are +// sorted by the keys. +func (n *Name) String() string { + if n.Scheme == "" { + return n.ID + } + + // Sort and build query. + keys := make([]string, 0, len(n.ContextParams)) + for k := range n.ContextParams { + keys = append(keys, k) + } + sort.Strings(keys) + var pairs []string + for _, k := range keys { + pairs = append(pairs, strings.Join([]string{k, n.ContextParams[k]}, "=")) + } + rawQuery := strings.Join(pairs, "&") + + path := n.Type + if n.ID != "" { + path = "/" + path + "/" + n.ID + } + + tempURL := &url.URL{ + Scheme: n.Scheme, + Host: n.Authority, + Path: path, + RawQuery: rawQuery, + RawFragment: n.processingDirective, + } + return tempURL.String() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go new file mode 100644 index 000000000..55cfd6fbb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -0,0 +1,174 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xdsresource implements the xDS data model layer. +// +// Provides resource-type specific functionality to unmarshal xDS protos into +// internal data structures that contain only fields gRPC is interested in. +// These internal data structures are passed to components in the xDS stack +// (resolver/balancers/server) that have expressed interest in receiving +// updates to specific resources. +package xdsresource + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + xdsinternal.ResourceTypeMapForTesting = make(map[string]any) + xdsinternal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType + xdsinternal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType + xdsinternal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType + xdsinternal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType +} + +// Producer contains a single method to discover resource configuration from a +// remote management server using xDS APIs. +// +// The xdsclient package provides a concrete implementation of this interface. +type Producer interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) +} + +// OnDoneFunc is a function to be invoked by watcher implementations upon +// completing the processing of a callback from the xDS client. Failure to +// invoke this callback prevents the xDS client from reading further messages +// from the xDS server. +type OnDoneFunc func() + +// ResourceWatcher wraps the callbacks to be invoked for different events +// corresponding to the resource being watched. +type ResourceWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + // The ResourceData parameter needs to be type asserted to the appropriate + // type for the resource being watched. + OnUpdate(ResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +// TODO: Once the implementation is complete, rename this interface as +// ResourceType and get rid of the existing ResourceType enum. + +// Type wraps all resource-type specific functionality. Each supported resource +// type will provide an implementation of this interface. +type Type interface { + // TypeURL is the xDS type URL of this resource type for v3 transport. + TypeURL() string + + // TypeName identifies resources in a transport protocol agnostic way. This + // can be used for logging/debugging purposes, as well in cases where the + // resource type name is to be uniquely identified but the actual + // functionality provided by the resource type is not required. + // + // TODO: once Type is renamed to ResourceType, rename TypeName to + // ResourceTypeName. + TypeName() string + + // AllResourcesRequiredInSotW indicates whether this resource type requires + // that all resources be present in every SotW response from the server. If + // true, a response that does not include a previously seen resource will be + // interpreted as a deletion of that resource. + AllResourcesRequiredInSotW() bool + + // Decode deserializes and validates an xDS resource serialized inside the + // provided `Any` proto, as received from the xDS management server. + // + // If protobuf deserialization fails or resource validation fails, + // returns a non-nil error. Otherwise, returns a fully populated + // DecodeResult. + Decode(*DecodeOptions, *anypb.Any) (*DecodeResult, error) +} + +// ResourceData contains the configuration data sent by the xDS management +// server, associated with the resource being watched. Every resource type must +// provide an implementation of this interface to represent the configuration +// received from the xDS management server. +type ResourceData interface { + isResourceData() + + // Equal returns true if the passed in resource data is equal to that of the + // receiver. + Equal(ResourceData) bool + + // ToJSON returns a JSON string representation of the resource data. + ToJSON() string + + Raw() *anypb.Any +} + +// DecodeOptions wraps the options required by ResourceType implementation for +// decoding configuration received from the xDS management server. +type DecodeOptions struct { + // BootstrapConfig contains the complete bootstrap configuration passed to + // the xDS client. This contains useful data for resource validation. + BootstrapConfig *bootstrap.Config + // ServerConfig contains the server config (from the above bootstrap + // configuration) of the xDS server from which the current resource, for + // which Decode() is being invoked, was received. + ServerConfig *bootstrap.ServerConfig +} + +// DecodeResult is the result of a decode operation. +type DecodeResult struct { + // Name is the name of the resource being watched. + Name string + // Resource contains the configuration associated with the resource being + // watched. + Resource ResourceData +} + +// resourceTypeState wraps the static state associated with concrete resource +// type implementations, which can then embed this struct and get the methods +// implemented here for free. +type resourceTypeState struct { + typeURL string + typeName string + allResourcesRequiredInSotW bool +} + +func (r resourceTypeState) TypeURL() string { + return r.typeURL +} + +func (r resourceTypeState) TypeName() string { + return r.typeName +} + +func (r resourceTypeState) AllResourcesRequiredInSotW() bool { + return r.allResourcesRequiredInSotW +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go new file mode 100644 index 000000000..ed32abb83 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // RouteConfigTypeName represents the transport agnostic name for the + // route config resource. + RouteConfigTypeName = "RouteConfigResource" +) + +var ( + // Compile time interface checks. + _ Type = routeConfigResourceType{} + + // Singleton instantiation of the resource type implementation. + routeConfigType = routeConfigResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3RouteConfigURL, + typeName: "RouteConfigResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// routeConfigResourceType provides the resource-type specific functionality for +// a RouteConfiguration resource. +// +// Implements the Type interface. +type routeConfigResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (routeConfigResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalRouteConfigResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: RouteConfigUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: rc}}, nil + +} + +// RouteConfigResourceData wraps the configuration of a RouteConfiguration +// resource as received from the management server. +// +// Implements the ResourceData interface. +type RouteConfigResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource RouteConfigUpdate +} + +// Equal returns true if other is equal to r. +func (r *RouteConfigResourceData) Equal(other ResourceData) bool { + if r == nil && other == nil { + return true + } + if (r == nil) != (other == nil) { + return false + } + return proto.Equal(r.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (r *RouteConfigResourceData) ToJSON() string { + return pretty.ToJSON(r.Resource) +} + +// Raw returns the underlying raw protobuf form of the route configuration +// resource. +func (r *RouteConfigResourceData) Raw() *anypb.Any { + return r.Resource.Raw +} + +// RouteConfigWatcher wraps the callbacks to be invoked for different +// events corresponding to the route configuration resource being watched. +type RouteConfigWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*RouteConfigResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingRouteConfigWatcher struct { + watcher RouteConfigWatcher +} + +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + rc := data.(*RouteConfigResourceData) + d.watcher.OnUpdate(rc, onDone) +} + +func (d *delegatingRouteConfigWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchRouteConfig uses xDS to discover the configuration associated with the +// provided route configuration resource name. +func WatchRouteConfig(p Producer, name string, w RouteConfigWatcher) (cancel func()) { + delegator := &delegatingRouteConfigWatcher{watcher: w} + return p.WatchResource(routeConfigType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go new file mode 100644 index 000000000..994204101 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(any) error + +// UpdateMetadata contains the metadata for each update, including timestamp, +// raw message, and so on. +type UpdateMetadata struct { + // Status is the status of this resource, e.g. ACKed, NACKed, or + // Not_exist(removed). + Status ServiceStatus + // Version is the version of the xds response. Note that this is the version + // of the resource in use (previous ACKed). If a response is NACKed, the + // NACKed version is in ErrState. + Version string + // Timestamp is when the response is received. + Timestamp time.Time + // ErrState is set when the update is NACKed. + ErrState *UpdateErrorMetadata +} + +// IsListenerResource returns true if the provider URL corresponds to an xDS +// Listener resource. +func IsListenerResource(url string) bool { + return url == version.V3ListenerURL +} + +// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS +// HTTPConnManager resource. +func IsHTTPConnManagerResource(url string) bool { + return url == version.V3HTTPConnManagerURL +} + +// IsRouteConfigResource returns true if the provider URL corresponds to an xDS +// RouteConfig resource. +func IsRouteConfigResource(url string) bool { + return url == version.V3RouteConfigURL +} + +// IsClusterResource returns true if the provider URL corresponds to an xDS +// Cluster resource. +func IsClusterResource(url string) bool { + return url == version.V3ClusterURL +} + +// IsEndpointsResource returns true if the provider URL corresponds to an xDS +// Endpoints resource. +func IsEndpointsResource(url string) bool { + return url == version.V3EndpointsURL +} + +// UnwrapResource unwraps and returns the inner resource if it's in a resource +// wrapper. The original resource is returned if it's not wrapped. +func UnwrapResource(r *anypb.Any) (*anypb.Any, error) { + url := r.GetTypeUrl() + if url != version.V3ResourceWrapperURL { + // Not wrapped. + return r, nil + } + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return nil, err + } + return inner.Resource, nil +} + +// ServiceStatus is the status of the update. +type ServiceStatus int + +const ( + // ServiceStatusUnknown is the default state, before a watch is started for + // the resource. + ServiceStatusUnknown ServiceStatus = iota + // ServiceStatusRequested is when the watch is started, but before and + // response is received. + ServiceStatusRequested + // ServiceStatusNotExist is when the resource doesn't exist in + // state-of-the-world responses (e.g. LDS and CDS), which means the resource + // is removed by the management server. + ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. + // ServiceStatusACKed is when the resource is ACKed. + ServiceStatusACKed + // ServiceStatusNACKed is when the resource is NACKed. + ServiceStatusNACKed +) + +// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state +// when a response is NACKed. +type UpdateErrorMetadata struct { + // Version is the version of the NACKed response. + Version string + // Err contains why the response was NACKed. + Err error + // Timestamp is when the NACKed response was received. + Timestamp time.Time +} + +// UpdateWithMD contains the raw message of the update and the metadata, +// including version, raw message, timestamp. +// +// This is to be used for config dump and CSDS, not directly by users (like +// resolvers/balancers). +type UpdateWithMD struct { + MD UpdateMetadata + Raw *anypb.Any +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go new file mode 100644 index 000000000..ae4cb1e46 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/protobuf/types/known/anypb" +) + +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + +// ClusterUpdate contains information from a received CDS response, which is of +// interest to the registered CDS watcher. +type ClusterUpdate struct { + ClusterType ClusterType + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string + // LRSServerConfig contains configuration about the xDS server that sent + // this cluster resource. This is also the server where load reports are to + // be sent, for this cluster. + LRSServerConfig *bootstrap.ServerConfig + // SecurityCfg contains security configuration sent by the control plane. + SecurityCfg *SecurityConfig + // MaxRequests for circuit breaking, if any (otherwise nil). + MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string + + // LBPolicy represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. + LBPolicy json.RawMessage + + // OutlierDetection is the outlier detection configuration for this cluster. + // If nil, it means this cluster does not use the outlier detection feature. + OutlierDetection json.RawMessage + + // Raw is the resource from the xds response. + Raw *anypb.Any + // TelemetryLabels are the string valued metadata of filter_metadata type + // "com.google.csm.telemetry_labels" with keys "service_name" or + // "service_namespace". + TelemetryLabels map[string]string +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go new file mode 100644 index 000000000..1254d250c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// OverloadDropConfig contains the config to drop overloads. +type OverloadDropConfig struct { + Category string + Numerator uint32 + Denominator uint32 +} + +// EndpointHealthStatus represents the health status of an endpoint. +type EndpointHealthStatus int32 + +const ( + // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. + EndpointHealthStatusUnknown EndpointHealthStatus = iota + // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. + EndpointHealthStatusHealthy + // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. + EndpointHealthStatusUnhealthy + // EndpointHealthStatusDraining represents HealthStatus DRAINING. + EndpointHealthStatusDraining + // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. + EndpointHealthStatusTimeout + // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. + EndpointHealthStatusDegraded +) + +// Endpoint contains information of an endpoint. +type Endpoint struct { + Address string + HealthStatus EndpointHealthStatus + Weight uint32 +} + +// Locality contains information of a locality. +type Locality struct { + Endpoints []Endpoint + ID internal.LocalityID + Priority uint32 + Weight uint32 +} + +// EndpointsUpdate contains an EDS update. +type EndpointsUpdate struct { + Drops []OverloadDropConfig + // Localities in the EDS response with `load_balancing_weight` field not set + // or explicitly set to 0 are ignored while parsing the resource, and + // therefore do not show up here. + Localities []Locality + + // Raw is the resource from the xds response. + Raw *anypb.Any +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go new file mode 100644 index 000000000..a71e38ea9 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// ListenerUpdate contains information received in an LDS response, which is of +// interest to the registered LDS watcher. +type ListenerUpdate struct { + // RouteConfigName is the route configuration name corresponding to the + // target which is being watched through LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + + // MaxStreamDuration contains the HTTP connection manager's + // common_http_protocol_options.max_stream_duration field, or zero if + // unset. + MaxStreamDuration time.Duration + // HTTPFilters is a list of HTTP filters (name, config) from the LDS + // response. + HTTPFilters []HTTPFilter + // InboundListenerCfg contains inbound listener configuration. + InboundListenerCfg *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection +// manager field. +type HTTPFilter struct { + // Name is an arbitrary name of the filter. Used for applying override + // settings in virtual host / route / weighted cluster configuration (not + // yet supported). + Name string + // Filter is the HTTP filter found in the registry for the config type. + Filter httpfilter.Filter + // Config contains the filter's configuration + Config httpfilter.FilterConfig +} + +// InboundListenerConfig contains information about the inbound listener, i.e +// the server-side listener. +type InboundListenerConfig struct { + // Address is the local address on which the inbound listener is expected to + // accept incoming connections. + Address string + // Port is the local port on which the inbound listener is expected to + // accept incoming connections. + Port string + // FilterChains is the list of filter chains associated with this listener. + FilterChains *FilterChainManager +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go new file mode 100644 index 000000000..45d84d670 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go @@ -0,0 +1,248 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// RouteConfigUpdate contains information received in an RDS response, which is +// of interest to the registered RDS watcher. +type RouteConfigUpdate struct { + VirtualHosts []*VirtualHost + // ClusterSpecifierPlugins are the LB Configurations for any + // ClusterSpecifierPlugins referenced by the Route Table. + ClusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// VirtualHost contains the routes for a list of Domains. +// +// Note that the domains in this slice can be a wildcard, not an exact string. +// The consumer of this struct needs to find the best match for its hostname. +type VirtualHost struct { + Domains []string + // Routes contains a list of routes, each containing matchers and + // corresponding action. + Routes []*Route + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the virtual host which may be present. An individual filter's override + // may be unused if the matching Route contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. This is a 64-bit random int computed at initialization time. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + +// RouteActionType is the action of the route from a received RDS response. +type RouteActionType int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteActionType = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + +// Route is both a specification of how to match a request as well as an +// indication of the action to take upon match. +type Route struct { + Path *string + Prefix *string + Regex *regexp.Regexp + // Indicates if prefix/path matching should be case insensitive. The default + // is false (case sensitive). + CaseInsensitive bool + Headers []*HeaderMatcher + Fraction *uint32 + + HashPolicies []*HashPolicy + + // If the matchers above indicate a match, the below configuration is used. + // If MaxStreamDuration is nil, it indicates neither of the route action's + // max_stream_duration fields (grpc_timeout_header_max nor + // max_stream_duration) were set. In this case, the ListenerUpdate's + // MaxStreamDuration field should be used. If MaxStreamDuration is set to + // an explicit zero duration, the application's deadline should be used. + MaxStreamDuration *time.Duration + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the route which may be present. An individual filter's override may be + // unused if the matching WeightedCluster contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + ActionType RouteActionType + + // Only one of the following fields (WeightedClusters or + // ClusterSpecifierPlugin) will be set for a route. + WeightedClusters map[string]WeightedCluster + // ClusterSpecifierPlugin is the name of the Cluster Specifier Plugin that + // this Route is linked to, if specified by xDS. + ClusterSpecifierPlugin string +} + +// WeightedCluster contains settings for an xds ActionType.WeightedCluster. +type WeightedCluster struct { + // Weight is the relative weight of the cluster. It will never be zero. + Weight uint32 + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the weighted cluster which may be present. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig +} + +// HeaderMatcher represents header matchers. +type HeaderMatcher struct { + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool + StringMatch *matcher.StringMatcher +} + +// Int64Range is a range for header range match. +type Int64Range struct { + Start int64 + End int64 +} + +// SecurityConfig contains the security configuration received as part of the +// Cluster resource on the client-side, and as part of the Listener resource on +// the server-side. +type SecurityConfig struct { + // RootInstanceName identifies the certProvider plugin to be used to fetch + // root certificates. This instance name will be resolved to the plugin name + // and its associated configuration from the certificate_providers field of + // the bootstrap file. + RootInstanceName string + // RootCertName is the certificate name to be passed to the plugin (looked + // up from the bootstrap file) while fetching root certificates. + RootCertName string + // IdentityInstanceName identifies the certProvider plugin to be used to + // fetch identity certificates. This instance name will be resolved to the + // plugin name and its associated configuration from the + // certificate_providers field of the bootstrap file. + IdentityInstanceName string + // IdentityCertName is the certificate name to be passed to the plugin + // (looked up from the bootstrap file) while fetching identity certificates. + IdentityCertName string + // SubjectAltNameMatchers is an optional list of match criteria for SANs + // specified on the peer certificate. Used only on the client-side. + // + // Some intricacies: + // - If this field is empty, then any peer certificate is accepted. + // - If the peer certificate contains a wildcard DNS SAN, and an `exact` + // matcher is configured, a wildcard DNS match is performed instead of a + // regular string comparison. + SubjectAltNameMatchers []matcher.StringMatcher + // RequireClientCert indicates if the server handshake process expects the + // client to present a certificate. Set to true when performing mTLS. Used + // only on the server-side. + RequireClientCert bool +} + +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go new file mode 100644 index 000000000..ab024b57c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -0,0 +1,699 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +// ValidateClusterAndConstructClusterUpdateForTesting exports the +// validateClusterAndConstructClusterUpdate function for testing purposes. +var ValidateClusterAndConstructClusterUpdateForTesting = validateClusterAndConstructClusterUpdate + +// TransportSocket proto message has a `name` field which is expected to be set +// to this value by the management server. +const transportSocketName = "envoy.transport_sockets.tls" + +func unmarshalClusterResource(r *anypb.Any, serverCfg *bootstrap.ServerConfig) (string, ClusterUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsClusterResource(r.GetTypeUrl()) { + return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cluster := &v3clusterpb.Cluster{} + if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + cu, err := validateClusterAndConstructClusterUpdate(cluster, serverCfg) + if err != nil { + return cluster.GetName(), ClusterUpdate{}, err + } + cu.Raw = r + + return cluster.GetName(), cu, nil +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M + + defaultLeastRequestChoiceCount = 2 +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster, serverCfg *bootstrap.ServerConfig) (ClusterUpdate, error) { + telemetryLabels := make(map[string]string) + if fmd := cluster.GetMetadata().GetFilterMetadata(); fmd != nil { + if val, ok := fmd["com.google.csm.telemetry_labels"]; ok { + if fields := val.GetFields(); fields != nil { + if val, ok := fields["service_name"]; ok { + if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { + telemetryLabels["csm.service_name"] = val.GetStringValue() + } + } + if val, ok := fields["service_namespace"]; ok { + if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { + telemetryLabels["csm.service_namespace_name"] = val.GetStringValue() + } + } + } + } + } + // "The values for the service labels csm.service_name and + // csm.service_namespace_name come from xDS, “unknown” if not present." - + // CSM Design. + if _, ok := telemetryLabels["csm.service_name"]; !ok { + telemetryLabels["csm.service_name"] = "unknown" + } + if _, ok := telemetryLabels["csm.service_namespace_name"]; !ok { + telemetryLabels["csm.service_namespace_name"] = "unknown" + } + + var lbPolicy json.RawMessage + var err error + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`) + case v3clusterpb.Cluster_RING_HASH: + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) + case v3clusterpb.Cluster_LEAST_REQUEST: + if !envconfig.LeastRequestLB { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + lr := cluster.GetLeastRequestLbConfig() + var choiceCount uint32 = defaultLeastRequestChoiceCount + if cc := lr.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + // "If choice_count < 2, the config will be rejected." - A48 + if choiceCount < 2 { + return ClusterUpdate{}, fmt.Errorf("Cluster_LeastRequestLbConfig.ChoiceCount must be >= 2, got: %v", choiceCount) + } + + lrLBCfg := []byte(fmt.Sprintf("{\"choiceCount\": %d}", choiceCount)) + lbPolicy = []byte(fmt.Sprintf(`[{"least_request_experimental": %s}]`, lrLBCfg)) + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + + // Process outlier detection received from the control plane iff the + // corresponding environment variable is set. + var od json.RawMessage + if od, err = outlierConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + + if cluster.GetLoadBalancingPolicy() != nil { + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0) + if err != nil { + return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) + } + // "It will be the responsibility of the XdsClient to validate the + // converted configuration. It will do this by having the gRPC LB policy + // registry parse the configuration." - A52 + bc := &iserviceconfig.BalancerConfig{} + if err := json.Unmarshal(lbPolicy, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) + } + } + + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + OutlierDetection: od, + TelemetryLabels: telemetryLabels, + } + + if lrs := cluster.GetLrsServer(); lrs != nil { + if lrs.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("unsupported config_source_specifier %T in lrs_server field", lrs.ConfigSourceSpecifier) + } + ret.LRSServerConfig = serverCfg + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if configsource := cluster.GetEdsClusterConfig().GetEdsConfig(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS config source is not ADS or Self: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + if strings.HasPrefix(ret.ClusterName, "xdstp:") && ret.EDSServiceName == "" { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS service name is not set with a new-style cluster name: %+v", cluster) + } + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if len(clusters.Clusters) == 0 { + return ClusterUpdate{}, fmt.Errorf("xds: aggregate cluster has empty clusters field in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } +} + +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + +// securityConfigFromCluster extracts the relevant security configuration from +// the received Cluster resource. +func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) + } + // The Cluster resource contains a `transport_socket` field, which contains + // a oneof `typed_config` field of type `protobuf.Any`. The any proto + // contains a marshaled representation of an `UpstreamTlsContext` message. + ts := cluster.GetTransportSocket() + if ts == nil { + return nil, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) + } + upstreamCtx := &v3tlspb.UpstreamTlsContext{} + if err := proto.Unmarshal(tc.GetValue(), upstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) + } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys + if upstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") + } + + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } + } + return sc, nil +} + +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `CommonTlsContext` contains a + // `tls_certificate_certificate_provider_instance` field of type + // `CertificateProviderInstance`, which contains the provider instance name + // and the certificate name to fetch identity certs. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a `validation_context_type` field which + // is a oneof. We can get the values that we are interested in from two of + // those possible values: + // - combined validation context: + // - contains a default validation context which holds the list of + // matchers for accepted SANs. + // - contains certificate provider instance configuration + // - certificate provider instance configuration + // - in this case, we do not get a list of accepted SANs. + switch t := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + combined := common.GetCombinedValidationContext() + var matchers []matcher.StringMatcher + if def := combined.GetDefaultValidationContext(); def != nil { + for _, m := range def.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + } + case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: + pi := common.GetValidationContextCertificateProviderInstance() + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + case nil: + // It is valid for the validation context to be nil on the server side. + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", t) + } + return sc, nil +} + +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + +// circuitBreakersFromCluster extracts the circuit breakers configuration from +// the received cluster resource. Returns nil if no CircuitBreakers or no +// Thresholds in CircuitBreakers. +func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { + for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { + if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { + continue + } + maxRequestsPb := threshold.GetMaxRequests() + if maxRequestsPb == nil { + return nil + } + maxRequests := maxRequestsPb.GetValue() + return &maxRequests + } + return nil +} + +// idurationp takes a time.Duration and converts it to an internal duration, and +// returns a pointer to that internal duration. +func idurationp(d time.Duration) *iserviceconfig.Duration { + id := iserviceconfig.Duration(d) + return &id +} + +func uint32p(i uint32) *uint32 { + return &i +} + +// Helper types to prepare Outlier Detection JSON. Pointer types to distinguish +// between unset and a zero value. +type successRateEjection struct { + StdevFactor *uint32 `json:"stdevFactor,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type failurePercentageEjection struct { + Threshold *uint32 `json:"threshold,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type odLBConfig struct { + Interval *iserviceconfig.Duration `json:"interval,omitempty"` + BaseEjectionTime *iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + MaxEjectionTime *iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionPercent *uint32 `json:"maxEjectionPercent,omitempty"` + SuccessRateEjection *successRateEjection `json:"successRateEjection,omitempty"` + FailurePercentageEjection *failurePercentageEjection `json:"failurePercentageEjection,omitempty"` +} + +// outlierConfigFromCluster converts the received Outlier Detection +// configuration into JSON configuration for Outlier Detection, taking into +// account xDS Defaults. Returns nil if no OutlierDetection field set in the +// cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (json.RawMessage, error) { + od := cluster.GetOutlierDetection() + if od == nil { + return nil, nil + } + + // "The outlier_detection field of the Cluster resource should have its fields + // validated according to the rules for the corresponding LB policy config + // fields in the above "Validation" section. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + var interval *iserviceconfig.Duration + if i := od.GetInterval(); i != nil { + if err := i.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) + } + if interval = idurationp(i.AsDuration()); *interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", *interval) + } + } + + var baseEjectionTime *iserviceconfig.Duration + if bet := od.GetBaseEjectionTime(); bet != nil { + if err := bet.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) + } + if baseEjectionTime = idurationp(bet.AsDuration()); *baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", *baseEjectionTime) + } + } + + var maxEjectionTime *iserviceconfig.Duration + if met := od.GetMaxEjectionTime(); met != nil { + if err := met.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) + } + if maxEjectionTime = idurationp(met.AsDuration()); *maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", *maxEjectionTime) + } + } + + // "The fields max_ejection_percent, enforcing_success_rate, + // failure_percentage_threshold, and enforcing_failure_percentage must have + // values less than or equal to 100. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + var maxEjectionPercent *uint32 + if mep := od.GetMaxEjectionPercent(); mep != nil { + if maxEjectionPercent = uint32p(mep.GetValue()); *maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", *maxEjectionPercent) + } + } + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var enforcingSuccessRate *uint32 + if esr := od.GetEnforcingSuccessRate(); esr != nil { + if enforcingSuccessRate = uint32p(esr.GetValue()); *enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", *enforcingSuccessRate) + } + } + var failurePercentageThreshold *uint32 + if fpt := od.GetFailurePercentageThreshold(); fpt != nil { + if failurePercentageThreshold = uint32p(fpt.GetValue()); *failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", *failurePercentageThreshold) + } + } + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var enforcingFailurePercentage *uint32 + if efp := od.GetEnforcingFailurePercentage(); efp != nil { + if enforcingFailurePercentage = uint32p(efp.GetValue()); *enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", *enforcingFailurePercentage) + } + } + + var successRateStdevFactor *uint32 + if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { + successRateStdevFactor = uint32p(srsf.GetValue()) + } + var successRateMinimumHosts *uint32 + if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { + successRateMinimumHosts = uint32p(srmh.GetValue()) + } + var successRateRequestVolume *uint32 + if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { + successRateRequestVolume = uint32p(srrv.GetValue()) + } + var failurePercentageMinimumHosts *uint32 + if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { + failurePercentageMinimumHosts = uint32p(fpmh.GetValue()) + } + var failurePercentageRequestVolume *uint32 + if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { + failurePercentageRequestVolume = uint32p(fprv.GetValue()) + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *successRateEjection + if enforcingSuccessRate == nil || *enforcingSuccessRate != 0 { + sre = &successRateEjection{ + StdevFactor: successRateStdevFactor, + EnforcementPercentage: enforcingSuccessRate, + MinimumHosts: successRateMinimumHosts, + RequestVolume: successRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *failurePercentageEjection + if enforcingFailurePercentage != nil && *enforcingFailurePercentage != 0 { + fpe = &failurePercentageEjection{ + Threshold: failurePercentageThreshold, + EnforcementPercentage: enforcingFailurePercentage, + MinimumHosts: failurePercentageMinimumHosts, + RequestVolume: failurePercentageRequestVolume, + } + } + + odLBCfg := &odLBConfig{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } + return json.Marshal(odLBCfg) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go new file mode 100644 index 000000000..f65845b70 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "net" + "strconv" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsEndpointsResource(r.GetTypeUrl()) { + return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cla := &v3endpointpb.ClusterLoadAssignment{} + if err := proto.Unmarshal(r.GetValue(), cla); err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := parseEDSRespProto(cla) + if err != nil { + return cla.GetClusterName(), EndpointsUpdate{}, err + } + u.Raw = r + return cla.GetClusterName(), u, nil +} + +func parseAddress(socketAddress *v3corepb.SocketAddress) string { + return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) +} + +func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { + percentage := dropPolicy.GetDropPercentage() + var ( + numerator = percentage.GetNumerator() + denominator uint32 + ) + switch percentage.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + denominator = 100 + case v3typepb.FractionalPercent_TEN_THOUSAND: + denominator = 10000 + case v3typepb.FractionalPercent_MILLION: + denominator = 1000000 + } + return OverloadDropConfig{ + Category: dropPolicy.GetCategory(), + Numerator: numerator, + Denominator: denominator, + } +} + +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs map[string]bool) ([]Endpoint, error) { + endpoints := make([]Endpoint, 0, len(lbEndpoints)) + for _, lbEndpoint := range lbEndpoints { + // If the load_balancing_weight field is specified, it must be set to a + // value of at least 1. If unspecified, each host is presumed to have + // equal weight in a locality. + weight := uint32(1) + if w := lbEndpoint.GetLoadBalancingWeight(); w != nil { + if w.GetValue() == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } + weight = w.GetValue() + } + addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) + if uniqueEndpointAddrs[addr] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + } + uniqueEndpointAddrs[addr] = true + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: addr, + Weight: weight, + }) + } + return endpoints, nil +} + +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { + ret := EndpointsUpdate{} + for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { + ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) + } + priorities := make(map[uint32]map[string]bool) + sumOfWeights := make(map[uint32]uint64) + uniqueEndpointAddrs := make(map[string]bool) + for _, locality := range m.Endpoints { + l := locality.GetLocality() + if l == nil { + return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + } + weight := locality.GetLoadBalancingWeight().GetValue() + if weight == 0 { + logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) + continue + } + priority := locality.GetPriority() + sumOfWeights[priority] += uint64(weight) + if sumOfWeights[priority] > math.MaxUint32 { + return EndpointsUpdate{}, fmt.Errorf("sum of weights of localities at the same priority %d exceeded maximal value", priority) + } + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } + lidStr, _ := lid.ToString() + + // "Since an xDS configuration can place a given locality under multiple + // priorities, it is possible to see locality weight attributes with + // different values for the same locality." - A52 + // + // This is handled in the client by emitting the locality weight + // specified for the priority it is specified in. If the same locality + // has a different weight in two priorities, each priority will specify + // a locality with the locality weight specified for that priority, and + // thus the subsequent tree of balancers linked to that priority will + // use that locality weight as well. + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true + endpoints, err := parseEndpoints(locality.GetLbEndpoints(), uniqueEndpointAddrs) + if err != nil { + return EndpointsUpdate{}, err + } + ret.Localities = append(ret.Localities, Locality{ + ID: lid, + Endpoints: endpoints, + Weight: weight, + Priority: priority, + }) + } + for i := 0; i < len(priorities); i++ { + if _, ok := priorities[uint32(i)]; !ok { + return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + } + } + return ret, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go new file mode 100644 index 000000000..e9a29b938 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -0,0 +1,277 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "strconv" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsListenerResource(r.GetTypeUrl()) { + return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + lu, err := processListener(lis) + if err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + lu.Raw = r + return lis.GetName(), *lu, nil +} + +func processListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if lis.GetApiListener() != nil { + return processClientSideListener(lis) + } + return processServerSideListener(lis) +} + +// processClientSideListener checks if the provided Listener proto meets +// the expected criteria. If so, it returns a non-empty routeConfigName. +func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + update := &ListenerUpdate{} + + apiLisAny := lis.GetApiListener().GetApiListener() + if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { + return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) + } + apiLis := &v3httppb.HttpConnectionManager{} + if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { + return nil, fmt.Errorf("failed to unmarshal api_listener: %v", err) + } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } + + switch apiLis.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if configsource := apiLis.GetRds().GetConfigSource(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return nil, fmt.Errorf("LDS's RDS configSource is not ADS or Self: %+v", lis) + } + name := apiLis.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", lis) + } + update.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) + } + + // The following checks and fields only apply to xDS protocol versions v3+. + + update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + + var err error + if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + return nil, err + } + + return update, nil +} + +func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { + switch { + case config.MessageIs(&v3xdsxdstypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3xdsxdstypepb.TypedStruct) + if err := config.UnmarshalTo(s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case config.MessageIs(&v1xdsudpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1xdsudpatypepb.TypedStruct) + if err := config.UnmarshalTo(s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: + return config, config.GetTypeUrl(), nil + } +} + +func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { + config, typeURL, err := unwrapHTTPFilterConfig(cfg) + if err != nil { + return nil, nil, err + } + filterBuilder := httpfilter.Get(typeURL) + if filterBuilder == nil { + if optional { + return nil, nil, nil + } + return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) + } + parseFunc := filterBuilder.ParseFilterConfig + if !lds { + parseFunc = filterBuilder.ParseFilterConfigOverride + } + filterConfig, err := parseFunc(config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) + } + return filterBuilder, filterConfig, nil +} + +func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { + if len(cfgs) == 0 { + return nil, nil + } + m := make(map[string]httpfilter.FilterConfig) + for name, cfg := range cfgs { + optional := false + s := new(v3routepb.FilterConfig) + if cfg.MessageIs(s) { + if err := cfg.UnmarshalTo(s); err != nil { + return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) + } + cfg = s.GetConfig() + optional = s.GetIsOptional() + } + + httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) + if err != nil { + return nil, fmt.Errorf("filter override %q: %v", name, err) + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + m[name] = config + } + return m, nil +} + +func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { + ret := make([]HTTPFilter, 0, len(filters)) + seenNames := make(map[string]bool, len(filters)) + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, errors.New("filter missing name field") + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate filter name %q", name) + } + seenNames[name] = true + + httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) + if err != nil { + return nil, err + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + if server { + if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) + } + } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) + } + + // Save name/config + ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) + } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } + return ret, nil +} + +func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if n := len(lis.ListenerFilters); n != 0 { + return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") + } + addr := lis.GetAddress() + if addr == nil { + return nil, fmt.Errorf("no address field in LDS response: %+v", lis) + } + sockAddr := addr.GetSocketAddress() + if sockAddr == nil { + return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) + } + lu := &ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: sockAddr.GetAddress(), + Port: strconv.Itoa(int(sockAddr.GetPortValue())), + }, + } + + fcMgr, err := NewFilterChainManager(lis) + if err != nil { + return nil, err + } + lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go new file mode 100644 index 000000000..db862514e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -0,0 +1,436 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "regexp" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsRouteConfigResource(r.GetTypeUrl()) { + return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + rc := &v3routepb.RouteConfiguration{} + if err := proto.Unmarshal(r.GetValue(), rc); err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := generateRDSUpdateFromRouteConfiguration(rc) + if err != nil { + return rc.GetName(), RouteConfigUpdate{}, err + } + u.Raw = r + return rc.GetName(), u, nil +} + +// generateRDSUpdateFromRouteConfiguration checks if the provided +// RouteConfiguration meets the expected criteria. If so, it returns a +// RouteConfigUpdate with nil error. +// +// A RouteConfiguration resource is considered valid when only if it contains a +// VirtualHost whose domain field matches the server name from the URI passed +// to the gRPC channel, and it contains a clusterName or a weighted cluster. +// +// The RouteConfiguration includes a list of virtualHosts, which may have zero +// or more elements. We are interested in the element whose domains field +// matches the server name specified in the "xds:" URI. The only field in the +// VirtualHost proto that the we are interested in is the list of routes. We +// only look at the last route in the list (the default route), whose match +// field must be empty and whose route field must be set. Inside that route +// message, the cluster field will contain the clusterName or weighted clusters +// we are looking for. +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) { + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + csps, err := processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + // cspNames represents all the cluster specifiers referenced by Route + // Actions - any cluster specifiers not referenced by a Route Action can be + // ignored and not emitted by the xdsclient. + var cspNames = make(map[string]bool) + for _, vh := range rc.GetVirtualHosts() { + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + for n := range cspNs { + cspNames[n] = true + } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + vhOut := &VirtualHost{ + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, + } + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) + } + vhOut.HTTPFilterConfigOverride = cfgs + vhs = append(vhs, vhOut) + } + + // "For any entry in the RouteConfiguration.cluster_specifier_plugins not + // referenced by an enclosed ActionType's cluster_specifier_plugin, the xDS + // client should not provide it to its consumers." - RLS in xDS Design + for name := range csps { + if !cspNames[name] { + delete(csps, name) + } + } + + return RouteConfigUpdate{VirtualHosts: vhs, ClusterSpecifierPlugins: csps}, nil +} + +func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (map[string]clusterspecifier.BalancerConfig, error) { + cspCfgs := make(map[string]clusterspecifier.BalancerConfig) + // "The xDS client will inspect all elements of the + // cluster_specifier_plugins field looking up a plugin based on the + // extension.typed_config of each." - RLS in xDS design + for _, csp := range csps { + cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) + if cs == nil { + if csp.GetIsOptional() { + // "If a plugin is not supported but has is_optional set, then + // we will ignore any routes that point to that plugin" + cspCfgs[csp.GetExtension().GetName()] = nil + continue + } + // "If no plugin is registered for it, the resource will be NACKed." + // - RLS in xDS design + return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + lbCfg, err := cs.ParseClusterSpecifierConfig(csp.GetExtension().GetTypedConfig()) + if err != nil { + // "If a plugin is found, the value of the typed_config field will + // be passed to it's conversion method, and if an error is + // encountered, the resource will be NACKED." - RLS in xDS design + return nil, fmt.Errorf("error: %q parsing config %q for cluster specifier %q of type %q", err, csp.GetExtension().GetTypedConfig(), csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + // "If all cluster specifiers are valid, the xDS client will store the + // configurations in a map keyed by the name of the extension instance." - + // RLS in xDS Design + cspCfgs[csp.GetExtension().GetName()] = lbCfg + } + return cspCfgs, nil +} + +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig) ([]*Route, map[string]bool, error) { + var routesRet []*Route + var cspNames = make(map[string]bool) + for _, r := range routes { + match := r.GetMatch() + if match == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a match", r) + } + + if len(match.GetQueryParameters()) != 0 { + // Ignore route with query parameters. + logger.Warningf("Ignoring route %+v with query parameter matchers", r) + continue + } + + pathSp := match.GetPathSpecifier() + if pathSp == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + } + + var route Route + switch pt := pathSp.(type) { + case *v3routepb.RouteMatch_Prefix: + route.Prefix = &pt.Prefix + case *v3routepb.RouteMatch_Path: + route.Path = &pt.Path + case *v3routepb.RouteMatch_SafeRegex: + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + } + + if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { + route.CaseInsensitive = !caseSensitive.Value + } + + for _, h := range match.GetHeaders() { + var header HeaderMatcher + switch ht := h.GetHeaderMatchSpecifier().(type) { + case *v3routepb.HeaderMatcher_ExactMatch: + header.ExactMatch = &ht.ExactMatch + case *v3routepb.HeaderMatcher_SafeRegexMatch: + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re + case *v3routepb.HeaderMatcher_RangeMatch: + header.RangeMatch = &Int64Range{ + Start: ht.RangeMatch.Start, + End: ht.RangeMatch.End, + } + case *v3routepb.HeaderMatcher_PresentMatch: + header.PresentMatch = &ht.PresentMatch + case *v3routepb.HeaderMatcher_PrefixMatch: + header.PrefixMatch = &ht.PrefixMatch + case *v3routepb.HeaderMatcher_SuffixMatch: + header.SuffixMatch = &ht.SuffixMatch + case *v3routepb.HeaderMatcher_StringMatch: + sm, err := matcher.StringMatcherFromProto(ht.StringMatch) + if err != nil { + return nil, nil, fmt.Errorf("route %+v has an invalid string matcher: %v", err, ht.StringMatch) + } + header.StringMatch = &sm + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + } + header.Name = h.GetName() + invert := h.GetInvertMatch() + header.InvertMatch = &invert + route.Headers = append(route.Headers, &header) + } + + if fr := match.GetRuntimeFraction(); fr != nil { + d := fr.GetDefaultValue() + n := d.GetNumerator() + switch d.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + n *= 10000 + case v3typepb.FractionalPercent_TEN_THOUSAND: + n *= 100 + case v3typepb.FractionalPercent_MILLION: + } + route.Fraction = &n + } + + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + hp, err := hashPoliciesProtoToSlice(action.HashPolicy) + if err != nil { + return nil, nil, err + } + route.HashPolicies = hp + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint64 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + totalWeight += uint64(w) + if totalWeight > math.MaxUint32 { + return nil, nil, fmt.Errorf("xds: total weight of clusters exceeds MaxUint32") + } + wc := WeightedCluster{Weight: w} + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs + route.WeightedClusters[c.GetName()] = wc + } + if totalWeight == 0 { + return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterSpecifierPlugin: + // gRFC A28 was updated to say the following: + // + // The route’s action field must be route, and its + // cluster_specifier: + // - Can be Cluster + // - Can be Weighted_clusters + // - Can be unset or an unsupported field. The route containing + // this action will be ignored. + // + // This means that if this env var is not set, we should treat + // it as if it we didn't know about the cluster_specifier_plugin + // at all. + if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { + // "When processing RouteActions, if any action includes a + // cluster_specifier_plugin value that is not in + // RouteConfiguration.cluster_specifier_plugins, the + // resource will be NACKed." - RLS in xDS design + return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) + } + if csps[a.ClusterSpecifierPlugin] == nil { + logger.Warningf("Ignoring route %+v with optional and unsupported cluster specifier plugin %+v", r, a.ClusterSpecifierPlugin) + continue + } + cspNames[a.ClusterSpecifierPlugin] = true + route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin + default: + logger.Warningf("Ignoring route %+v with unknown ClusterSpecifier %+v", r, a) + continue + } + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() + } + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d + } + + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.ActionType = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.ActionType = RouteActionNonForwardingAction + default: + route.ActionType = RouteActionUnsupported + } + + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v: %v", r, err) + } + route.HTTPFilterConfigOverride = cfgs + routesRet = append(routesRet, &route) + } + return routesRet, cspNames, nil +} + +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Warningf("Ignoring hash policy %+v with invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Warningf("Ignoring unsupported hash policy %T", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go new file mode 100644 index 000000000..82ad5fe52 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package version defines constants to distinguish between supported xDS API +// versions. +package version + +// Resource URLs. We need to be able to accept either version of the resource +// regardless of the version of the transport protocol in use. +const ( + googleapiPrefix = "type.googleapis.com/" + + V3ListenerType = "envoy.config.listener.v3.Listener" + V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" + V3ClusterType = "envoy.config.cluster.v3.Cluster" + V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + + V3ResourceWrapperURL = googleapiPrefix + "envoy.service.discovery.v3.Resource" + V3ListenerURL = googleapiPrefix + V3ListenerType + V3RouteConfigURL = googleapiPrefix + V3RouteConfigType + V3ClusterURL = googleapiPrefix + V3ClusterType + V3EndpointsURL = googleapiPrefix + V3EndpointsType + V3HTTPConnManagerURL = googleapiPrefix + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + V3UpstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + V3DownstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" +) diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go new file mode 100644 index 000000000..1fea8c830 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -0,0 +1,321 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "errors" + "fmt" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const serverPrefix = "[xds-server %p] " + +var ( + // These new functions will be overridden in unit tests. + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.New(name) + } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { + return grpc.NewServer(opts...) + } +) + +// grpcServer contains methods from grpc.Server which are used by the +// GRPCServer type here. This is useful for overriding in unit tests. +type grpcServer interface { + RegisterService(*grpc.ServiceDesc, any) + Serve(net.Listener) error + Stop() + GracefulStop() + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// GRPCServer wraps a gRPC server and provides server-side xDS functionality, by +// communication with a management server using xDS APIs. It implements the +// grpc.ServiceRegistrar interface and can be passed to service registration +// functions in IDL generated code. +type GRPCServer struct { + gs grpcServer + quit *grpcsync.Event + logger *internalgrpclog.PrefixLogger + opts *serverOptions + xdsC xdsclient.XDSClient + xdsClientClose func() +} + +// NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. +// The underlying gRPC server has no service registered and has not started to +// accept requests yet. +func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { + newOpts := []grpc.ServerOption{ + grpc.ChainUnaryInterceptor(xdsUnaryInterceptor), + grpc.ChainStreamInterceptor(xdsStreamInterceptor), + } + newOpts = append(newOpts, opts...) + s := &GRPCServer{ + gs: newGRPCServer(newOpts...), + quit: grpcsync.NewEvent(), + } + s.handleServerOptions(opts) + + // Initializing the xDS client upfront (instead of at serving time) + // simplifies the code by eliminating the need for a mutex to protect the + // xdsC and xdsClientClose fields. + newXDSClient := newXDSClient + if s.opts.bootstrapContentsForTesting != nil { + // Bootstrap file contents may be specified as a server option for tests. + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: name, + Contents: s.opts.bootstrapContentsForTesting, + }) + } + } + xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer) + if err != nil { + return nil, fmt.Errorf("xDS client creation failed: %v", err) + } + + // Validate the bootstrap configuration for server specific fields. + + // Listener resource name template is mandatory on the server side. + cfg := xdsClient.BootstrapConfig() + if cfg.ServerListenerResourceNameTemplate() == "" { + xdsClientClose() + return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") + } + + s.xdsC = xdsClient + s.xdsClientClose = xdsClientClose + + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) + s.logger.Infof("Created xds.GRPCServer") + + return s, nil +} + +// handleServerOptions iterates through the list of server options passed in by +// the user, and handles the xDS server specific options. +func (s *GRPCServer) handleServerOptions(opts []grpc.ServerOption) { + so := s.defaultServerOptions() + for _, opt := range opts { + if o, ok := opt.(*serverOption); ok { + o.apply(so) + } + } + s.opts = so +} + +func (s *GRPCServer) defaultServerOptions() *serverOptions { + return &serverOptions{ + // A default serving mode change callback which simply logs at the + // default-visible log level. This will be used if the application does not + // register a mode change callback. + // + // Note that this means that `s.opts.modeCallback` will never be nil and can + // safely be invoked directly from `handleServingModeChanges`. + modeCallback: s.loggingServerModeChangeCallback, + } +} + +func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args ServingModeChangeArgs) { + switch args.Mode { + case connectivity.ServingModeServing: + s.logger.Errorf("Listener %q entering mode: %q", addr.String(), args.Mode) + case connectivity.ServingModeNotServing: + s.logger.Errorf("Listener %q entering mode: %q due to error: %v", addr.String(), args.Mode, args.Err) + } +} + +// RegisterService registers a service and its implementation to the underlying +// gRPC server. It is called from the IDL generated code. This must be called +// before invoking Serve. +func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss any) { + s.gs.RegisterService(sd, ss) +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + return s.gs.GetServiceInfo() +} + +// Serve gets the underlying gRPC server to accept incoming connections on the +// listener lis, which is expected to be listening on a TCP port. +// +// A connection to the management server, to receive xDS configuration, is +// initiated here. +// +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *GRPCServer) Serve(lis net.Listener) error { + s.logger.Infof("Serve() passed a net.Listener on %s", lis.Addr().String()) + if _, ok := lis.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("xds: GRPCServer expects listener to return a net.TCPAddr. Got %T", lis.Addr()) + } + + if s.quit.HasFired() { + return grpc.ErrServerStopped + } + + // The server listener resource name template from the bootstrap + // configuration contains a template for the name of the Listener resource + // to subscribe to for a gRPC server. If the token `%s` is present in the + // string, it will be replaced with the server's listening "IP:port" (e.g., + // "0.0.0.0:8080", "[::]:8080"). + cfg := s.xdsC.BootstrapConfig() + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String()) + + // Create a listenerWrapper which handles all functionality required by + // this particular instance of Serve(). + lw := server.NewListenerWrapper(server.ListenerWrapperParams{ + Listener: lis, + ListenerResourceName: name, + XDSClient: s.xdsC, + ModeCallback: func(addr net.Addr, mode connectivity.ServingMode, err error) { + s.opts.modeCallback(addr, ServingModeChangeArgs{ + Mode: mode, + Err: err, + }) + }, + }) + return s.gs.Serve(lw) +} + +// Stop stops the underlying gRPC server. It immediately closes all open +// connections. It cancels all active RPCs on the server side and the +// corresponding pending RPCs on the client side will get notified by connection +// errors. +func (s *GRPCServer) Stop() { + s.quit.Fire() + s.gs.Stop() + if s.xdsC != nil { + s.xdsClientClose() + } +} + +// GracefulStop stops the underlying gRPC server gracefully. It stops the server +// from accepting new connections and RPCs and blocks until all the pending RPCs +// are finished. +func (s *GRPCServer) GracefulStop() { + s.quit.Fire() + s.gs.GracefulStop() + if s.xdsC != nil { + s.xdsClientClose() + } +} + +// routeAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func routeAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(interface { + UsableRouteConfiguration() xdsresource.UsableRouteConfiguration + }) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + + rc := cw.UsableRouteConfiguration() + // Error out at routing l7 level with a status code UNAVAILABLE, represents + // an nack before usable route configuration or resource not found for RDS + // or error combining LDS + RDS (Shouldn't happen). + if rc.Err != nil { + if logger.V(2) { + logger.Infof("RPC on connection with xDS Configuration error: %v", rc.Err) + } + return status.Error(codes.Unavailable, "error from xDS configuration for matched route configuration") + } + + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], rc.VHS) + if vh == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *xdsresource.RouteWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.Routes { + if r.M.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes + // RPCs matching that route to fail with UNAVAILABLE." - A36 + if r.ActionType != xdsresource.RouteActionNonForwardingAction { + return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.Interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return status.Errorf(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + +// xdsUnaryInterceptor is the unary interceptor added to the gRPC server to +// perform any xDS specific functionality on unary RPCs. +func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } + return handler(ctx, req) +} + +// xdsStreamInterceptor is the stream interceptor added to the gRPC server to +// perform any xDS specific functionality on streaming RPCs. +func xdsStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } + return handler(srv, ss) +} diff --git a/vendor/google.golang.org/grpc/xds/server_options.go b/vendor/google.golang.org/grpc/xds/server_options.go new file mode 100644 index 000000000..9b9700cf3 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/server_options.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" +) + +type serverOptions struct { + modeCallback ServingModeCallbackFunc + bootstrapContentsForTesting []byte +} + +type serverOption struct { + grpc.EmptyServerOption + apply func(*serverOptions) +} + +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} +} + +// ServingModeCallbackFunc is the callback that users can register to get +// notified about the server's serving mode changes. The callback is invoked +// with the address of the listener and its new mode. +// +// Users must not perform any blocking operations in this callback. +type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) + +// ServingModeChangeArgs wraps the arguments passed to the serving mode callback +// function. +type ServingModeChangeArgs struct { + // Mode is the new serving mode of the server listener. + Mode connectivity.ServingMode + // Err is set to a non-nil error if the server has transitioned into + // not-serving mode. + Err error +} + +// BootstrapContentsForTesting returns a grpc.ServerOption which allows users +// to inject a bootstrap configuration used by only this server, instead of the +// global configuration from the environment variables. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContentsForTesting = contents }} +} diff --git a/vendor/google.golang.org/grpc/xds/xds.go b/vendor/google.golang.org/grpc/xds/xds.go new file mode 100644 index 000000000..943d09f17 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/xds.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains an implementation of the xDS suite of protocols, to be +// used by gRPC client and server applications. +// +// On the client-side, users simply need to import this package to get all xDS +// functionality. On the server-side, users need to use the GRPCServer type +// exported by this package instead of the regular grpc.Server. +// +// See https://github.com/grpc/grpc-go/tree/master/examples/features/xds for +// example. +package xds + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/csds" + + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter" // Register the xDS LB Registry Converters. + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +var logger = grpclog.Component("xds") + +func init() { + internaladmin.AddService(func(registrar grpc.ServiceRegistrar) (func(), error) { + var grpcServer *grpc.Server + switch ss := registrar.(type) { + case *grpc.Server: + grpcServer = ss + case *GRPCServer: + sss, ok := ss.gs.(*grpc.Server) + if !ok { + logger.Warning("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") + return nil, nil + } + grpcServer = sss + default: + // Returning an error would cause the top level admin.Register() to + // fail. Log a warning instead. + logger.Error("Server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") + return nil, nil + } + + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + return nil, fmt.Errorf("failed to create csds server: %v", err) + } + v3statusgrpc.RegisterClientStatusDiscoveryServiceServer(grpcServer, csdss) + return csdss.Close, nil + }) +} + +// NewXDSResolverWithConfigForTesting creates a new xDS resolver builder using +// the provided xDS bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error))(bootstrapConfig) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b..8f9e592f8 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df22..0e72d8537 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c8..024ffebd3 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d..08dad7692 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b..fa790e0ff 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b0..d2f549497 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8d..67a51b327 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b..fd4d0c83d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd0121..d9b9d916a 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b4..7f67cbb6e 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff..bef5a25fb 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85..9404270de 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20..0d5b546e0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44..7c1f66c8c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb73..78be9df34 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23..077712c2c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a..f72ddd882 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd21224..6254f5de4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 000000000..9f6c32a7d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee6..b6849d669 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d45..741b5ed29 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8..79e186667 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f3338..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd09..832a7988f 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba..1ffddf687 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f686..fb8e15e8d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03..c36d4a9cd 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f2928..78445d116 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 856175542..ebcb4a8ab 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda..002e0047a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6eb..742cb518c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead..0015fcb35 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990..479527b58 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d8..246156561 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb075..6dea75cd5 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c5..c7e860fcd 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d3..87da199a3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd91..b99d4d241 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb82..1761bc9c6 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -55,11 +55,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6..19de8d371 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc..8f206a661 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,19 +295,20 @@ type Value struct { // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b..0d20722d7 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826a..006060e56 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,11 +123,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,11 +447,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go index 68eca0624..4ac01cc6f 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -1102,138 +1102,138 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2095 bytes of a gzipped FileDescriptorProto + // 2085 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, - 0xdd, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, - 0x74, 0xb7, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, - 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xc9, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, - 0xcf, 0x9d, 0xd9, 0xcc, 0x46, 0x91, 0x45, 0x6f, 0x9e, 0xdf, 0xf7, 0xfb, 0x7d, 0xbf, 0xb7, 0x86, - 0x8b, 0x87, 0x97, 0xbd, 0x86, 0x61, 0x37, 0x89, 0x63, 0x34, 0x5d, 0xea, 0xd9, 0xbe, 0xab, 0xd3, - 0xe6, 0xd1, 0x25, 0x62, 0x3a, 0x07, 0xe4, 0xcd, 0xe6, 0x3e, 0xb5, 0xa8, 0x4b, 0x18, 0x6d, 0x37, - 0x1c, 0xd7, 0x66, 0x36, 0x7a, 0x39, 0xa0, 0x6e, 0x10, 0xc7, 0x68, 0x84, 0xd4, 0x8d, 0x90, 0x7a, - 0xe5, 0xf5, 0x7d, 0x83, 0x1d, 0xf8, 0xbb, 0x0d, 0xdd, 0xee, 0x34, 0xf7, 0xed, 0x7d, 0xbb, 0x29, - 0x98, 0x76, 0xfd, 0x3d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x81, 0xb0, 0x15, 0x35, 0xa1, 0x5a, 0xb7, - 0x5d, 0xae, 0x36, 0xab, 0x70, 0xe5, 0xad, 0x98, 0xa6, 0x43, 0xf4, 0x03, 0xc3, 0xa2, 0xee, 0x71, - 0xd3, 0x39, 0xdc, 0x4f, 0xdb, 0x3b, 0x0c, 0x97, 0xd7, 0xec, 0x50, 0x46, 0xf2, 0x74, 0x35, 0x8b, - 0xb8, 0x5c, 0xdf, 0x62, 0x46, 0xa7, 0x5f, 0xcd, 0xdb, 0xcf, 0x62, 0xf0, 0xf4, 0x03, 0xda, 0x21, - 0x59, 0x3e, 0xf5, 0xbf, 0x0a, 0x2c, 0xac, 0x9b, 0xa6, 0xad, 0x13, 0x66, 0xd8, 0x16, 0xa6, 0x9e, - 0x6f, 0x32, 0xf4, 0x23, 0x98, 0x68, 0xd3, 0x23, 0x43, 0xa7, 0xde, 0xb2, 0x72, 0x4e, 0x59, 0x9d, - 0x5e, 0x7b, 0xab, 0x31, 0xc8, 0xd9, 0x8d, 0x4d, 0x41, 0x9c, 0x15, 0xa3, 0xcd, 0x3f, 0xec, 0xd6, - 0x47, 0x7a, 0xdd, 0xfa, 0x44, 0x80, 0xf7, 0x70, 0x28, 0x15, 0xdd, 0x81, 0x19, 0xcb, 0x6e, 0xd3, - 0x16, 0x35, 0xa9, 0xce, 0x6c, 0x77, 0xb9, 0x2a, 0xb4, 0x9c, 0x4b, 0x6a, 0xe1, 0x51, 0x68, 0x1c, - 0x5d, 0x6a, 0xdc, 0x48, 0xd0, 0x69, 0x0b, 0xbd, 0x6e, 0x7d, 0x26, 0x09, 0xc1, 0x29, 0x39, 0x68, - 0x0d, 0x40, 0xb7, 0x2d, 0xe6, 0xda, 0xa6, 0x49, 0xdd, 0xe5, 0xd1, 0x73, 0xca, 0xea, 0x94, 0x86, - 0xa4, 0x15, 0xb0, 0x11, 0x61, 0x70, 0x82, 0x4a, 0x7d, 0x5c, 0x85, 0x69, 0x8d, 0x78, 0x86, 0x1e, - 0x58, 0x89, 0x7e, 0x06, 0x40, 0x18, 0x73, 0x8d, 0x5d, 0x9f, 0x89, 0xf3, 0x57, 0x57, 0xa7, 0xd7, - 0xbe, 0x35, 0xf8, 0xfc, 0x09, 0xf6, 0xc6, 0x7a, 0xc4, 0xbb, 0x65, 0x31, 0xf7, 0x58, 0x7b, 0x25, - 0x54, 0x1f, 0x23, 0x7e, 0xfe, 0xaf, 0xfa, 0xec, 0x2d, 0x9f, 0x98, 0xc6, 0x9e, 0x41, 0xdb, 0x37, - 0x48, 0x87, 0xe2, 0x84, 0x46, 0x74, 0x04, 0x93, 0x3a, 0x71, 0x88, 0x6e, 0xb0, 0xe3, 0xe5, 0x8a, - 0xd0, 0xfe, 0x4e, 0x79, 0xed, 0x1b, 0x92, 0x33, 0xd0, 0x7d, 0x5e, 0xea, 0x9e, 0x0c, 0xc1, 0xfd, - 0x9a, 0x23, 0x5d, 0x2b, 0x26, 0xcc, 0x67, 0x6c, 0x47, 0x0b, 0x50, 0x3d, 0xa4, 0xc7, 0x22, 0x07, - 0xa6, 0x30, 0xff, 0x13, 0x6d, 0xc0, 0xd8, 0x11, 0x31, 0x7d, 0xba, 0x5c, 0x11, 0x11, 0x7b, 0xbd, - 0x54, 0x5e, 0x84, 0x52, 0x71, 0xc0, 0xfb, 0x6e, 0xe5, 0xb2, 0xb2, 0x72, 0x08, 0xb3, 0x29, 0x5b, - 0x73, 0x74, 0x6d, 0xa6, 0x75, 0x35, 0x12, 0xba, 0xa2, 0x14, 0x6f, 0x38, 0x87, 0xfb, 0x69, 0xe5, - 0xb7, 0x7c, 0x62, 0x31, 0x83, 0x1d, 0x27, 0x94, 0xa9, 0x57, 0x60, 0x71, 0x63, 0xeb, 0x5a, 0x60, - 0x4d, 0x32, 0x57, 0xe8, 0x3d, 0xc7, 0xa5, 0x9e, 0x67, 0xd8, 0x56, 0xa0, 0x37, 0xce, 0x95, 0xad, - 0x08, 0x83, 0x13, 0x54, 0xea, 0x11, 0x8c, 0xcb, 0x2c, 0x39, 0x07, 0xa3, 0x16, 0xe9, 0x50, 0xc9, - 0x37, 0x23, 0xf9, 0x46, 0x85, 0x4f, 0x05, 0x06, 0x5d, 0x85, 0xb1, 0x5d, 0x1e, 0x19, 0x69, 0xfe, - 0x85, 0xd2, 0x41, 0xd4, 0xa6, 0x7a, 0xdd, 0xfa, 0x98, 0x00, 0xe0, 0x40, 0x84, 0xfa, 0xa0, 0x02, - 0x67, 0xb3, 0x45, 0xb6, 0x61, 0x5b, 0x7b, 0xc6, 0xbe, 0xef, 0x8a, 0x0f, 0xf4, 0x5d, 0x18, 0x0f, - 0x44, 0x4a, 0x8b, 0x56, 0xa5, 0x45, 0xe3, 0x2d, 0x01, 0x7d, 0xda, 0xad, 0x9f, 0xc9, 0xb2, 0x06, - 0x18, 0x2c, 0xf9, 0xd0, 0x2a, 0x4c, 0xba, 0xf4, 0x53, 0x9f, 0x7a, 0xcc, 0x13, 0x79, 0x37, 0xa5, - 0xcd, 0xf0, 0xd4, 0xc1, 0x12, 0x86, 0x23, 0x2c, 0xba, 0xaf, 0xc0, 0x52, 0x50, 0xc9, 0x29, 0x1b, - 0x64, 0x15, 0x5f, 0x2a, 0x93, 0x13, 0x29, 0x46, 0xed, 0xab, 0xd2, 0xd8, 0xa5, 0x1c, 0x24, 0xce, - 0x53, 0xa5, 0xfe, 0x47, 0x81, 0x33, 0xf9, 0x5d, 0x07, 0xed, 0xc1, 0x84, 0x2b, 0xfe, 0x0a, 0x8b, - 0xf7, 0xbd, 0x32, 0x06, 0xc9, 0x63, 0x16, 0xf7, 0xb0, 0xe0, 0xdb, 0xc3, 0xa1, 0x70, 0xa4, 0xc3, - 0xb8, 0x2e, 0x6c, 0x92, 0x55, 0xfa, 0xde, 0x70, 0x3d, 0x32, 0xed, 0x81, 0xb9, 0x30, 0x5c, 0x01, - 0x18, 0x4b, 0xd1, 0xea, 0x6f, 0x15, 0x98, 0xcf, 0x54, 0x11, 0xaa, 0x41, 0xd5, 0xb0, 0x98, 0x48, - 0xab, 0x6a, 0x10, 0xa3, 0x6d, 0x8b, 0xdd, 0xe1, 0xc9, 0x8e, 0x39, 0x02, 0x9d, 0x87, 0xd1, 0x5d, - 0xdb, 0x36, 0x45, 0x38, 0x26, 0xb5, 0xd9, 0x5e, 0xb7, 0x3e, 0xa5, 0xd9, 0xb6, 0x19, 0x50, 0x08, - 0x14, 0xfa, 0x06, 0x8c, 0x7b, 0xcc, 0x35, 0xac, 0x7d, 0xd9, 0x23, 0xe7, 0x7b, 0xdd, 0xfa, 0x74, - 0x4b, 0x40, 0x02, 0x32, 0x89, 0x46, 0xaf, 0xc2, 0xc4, 0x11, 0x75, 0x45, 0x85, 0x8c, 0x09, 0x4a, - 0xd1, 0x81, 0xef, 0x04, 0xa0, 0x80, 0x34, 0x24, 0x50, 0x7f, 0x5f, 0x81, 0x69, 0x19, 0x40, 0x93, - 0x18, 0x1d, 0x74, 0x37, 0x91, 0x50, 0x41, 0x24, 0x5e, 0x1b, 0x22, 0x12, 0xda, 0x42, 0xd8, 0xbc, - 0x72, 0x32, 0x90, 0xc2, 0xb4, 0x6e, 0x5b, 0x1e, 0x73, 0x89, 0x61, 0xc9, 0x74, 0x4d, 0x37, 0x88, - 0x41, 0x89, 0x27, 0xd9, 0xb4, 0x25, 0xa9, 0x60, 0x3a, 0x86, 0x79, 0x38, 0x29, 0x17, 0x7d, 0x1c, - 0x85, 0xb8, 0x2a, 0x34, 0xbc, 0x5d, 0x4a, 0x03, 0x3f, 0x7c, 0xb9, 0xe8, 0xfe, 0x4d, 0x81, 0xe5, - 0x22, 0xa6, 0x54, 0x3d, 0x2a, 0xcf, 0x55, 0x8f, 0x95, 0x93, 0xab, 0xc7, 0x3f, 0x2b, 0x89, 0xd8, - 0x7b, 0x1e, 0xfa, 0x04, 0x26, 0xf9, 0x6a, 0xd3, 0x26, 0x8c, 0xc8, 0x15, 0xe2, 0x8d, 0x41, 0xed, - 0xdb, 0x6b, 0x70, 0x6a, 0x3e, 0xee, 0x6f, 0xee, 0xfe, 0x98, 0xea, 0xec, 0x3a, 0x65, 0x24, 0x6e, - 0xc6, 0x31, 0x0c, 0x47, 0x52, 0xd1, 0x4d, 0x18, 0xf5, 0x1c, 0xaa, 0x0f, 0x33, 0x88, 0x84, 0x69, - 0x2d, 0x87, 0xea, 0x71, 0xbf, 0xe6, 0x5f, 0x58, 0x08, 0x52, 0x7f, 0x95, 0x0c, 0x86, 0xe7, 0xa5, - 0x83, 0x51, 0xe4, 0x62, 0xe5, 0xe4, 0x5c, 0xfc, 0x79, 0xd4, 0x0a, 0x84, 0x7d, 0xd7, 0x0c, 0x8f, - 0xa1, 0x8f, 0xfa, 0xdc, 0xdc, 0x28, 0xe7, 0x66, 0xce, 0x2d, 0x9c, 0x1c, 0x55, 0x59, 0x08, 0x49, - 0xb8, 0xf8, 0x06, 0x8c, 0x19, 0x8c, 0x76, 0xc2, 0xfa, 0xba, 0x50, 0xda, 0xc7, 0xda, 0xac, 0x94, - 0x3a, 0xb6, 0xcd, 0xf9, 0x71, 0x20, 0x46, 0xfd, 0x5d, 0x25, 0x75, 0x02, 0xee, 0x7b, 0xf4, 0x43, - 0x98, 0xf2, 0xe4, 0x44, 0x0e, 0xbb, 0xc4, 0xc5, 0x32, 0x7a, 0xa2, 0x95, 0x70, 0x51, 0xaa, 0x9a, - 0x0a, 0x21, 0x1e, 0x8e, 0x25, 0x26, 0x2a, 0xb8, 0x32, 0x54, 0x05, 0x67, 0xe2, 0x5f, 0x54, 0xc1, - 0xe8, 0x2e, 0xcc, 0x7a, 0xbe, 0xc1, 0xc8, 0xae, 0x49, 0xf9, 0x5a, 0xea, 0x95, 0xde, 0x64, 0x17, - 0x7b, 0xdd, 0xfa, 0x6c, 0x2b, 0xc9, 0x8a, 0xd3, 0x92, 0x54, 0x17, 0xf2, 0x72, 0x03, 0xfd, 0x00, - 0xc6, 0x6d, 0x87, 0x7c, 0xea, 0x53, 0x19, 0xf0, 0x67, 0x2c, 0x87, 0x37, 0x05, 0x6d, 0x5e, 0x06, - 0x02, 0x3f, 0x4e, 0x80, 0xc6, 0x52, 0xa4, 0xfa, 0x40, 0x81, 0x85, 0x6c, 0x9f, 0x1c, 0xa2, 0x11, - 0xed, 0xc0, 0x5c, 0x87, 0x30, 0xfd, 0x20, 0x9a, 0x55, 0xa2, 0x3a, 0xa7, 0xb4, 0xd5, 0x5e, 0xb7, - 0x3e, 0x77, 0x3d, 0x85, 0x79, 0xda, 0xad, 0xa3, 0xf7, 0x7d, 0xd3, 0x3c, 0x4e, 0xaf, 0xa3, 0x19, - 0x7e, 0xf5, 0x17, 0x55, 0x98, 0x4d, 0x8d, 0x85, 0x12, 0x8b, 0xd7, 0x3a, 0xcc, 0xb7, 0xe3, 0x38, - 0x72, 0x84, 0x34, 0xe3, 0x2b, 0x92, 0x38, 0x99, 0x84, 0x82, 0x2f, 0x4b, 0x9f, 0xce, 0xca, 0xea, - 0x0b, 0xcf, 0xca, 0x3b, 0x30, 0x47, 0xa2, 0x45, 0xe0, 0xba, 0xdd, 0xa6, 0x72, 0x0c, 0x37, 0x24, - 0xd7, 0xdc, 0x7a, 0x0a, 0xfb, 0xb4, 0x5b, 0x3f, 0x95, 0x5d, 0x1f, 0x38, 0x1c, 0x67, 0xa4, 0xa0, - 0x57, 0x60, 0x4c, 0xb7, 0x7d, 0x8b, 0x89, 0x59, 0x5d, 0x8d, 0xab, 0x70, 0x83, 0x03, 0x71, 0x80, - 0x43, 0xdf, 0x84, 0x69, 0xd2, 0xee, 0x18, 0xd6, 0xba, 0xae, 0x53, 0xcf, 0x5b, 0x1e, 0x17, 0x5b, - 0x42, 0x34, 0x0b, 0xd7, 0x63, 0x14, 0x4e, 0xd2, 0xa9, 0x7f, 0x52, 0xc2, 0x15, 0xb4, 0x60, 0x55, - 0x42, 0x17, 0xf8, 0xe2, 0x25, 0x50, 0x32, 0x38, 0x89, 0xdd, 0x49, 0x80, 0x71, 0x88, 0x47, 0x5f, - 0x87, 0xf1, 0xb6, 0x6b, 0x1c, 0x51, 0x57, 0x46, 0x26, 0x2a, 0xaf, 0x4d, 0x01, 0xc5, 0x12, 0xcb, - 0x83, 0xed, 0x84, 0xab, 0x4c, 0x22, 0xd8, 0x3b, 0xb6, 0x6d, 0x62, 0x81, 0x11, 0x92, 0x84, 0x55, - 0xd2, 0x85, 0xb1, 0xa4, 0xc0, 0x56, 0x89, 0x55, 0x3f, 0x82, 0xb9, 0xcc, 0xfe, 0x7f, 0x15, 0xaa, - 0x3a, 0x35, 0x65, 0x15, 0x35, 0x07, 0x47, 0xb7, 0xef, 0xf6, 0xa0, 0x4d, 0xf4, 0xba, 0xf5, 0xea, - 0xc6, 0xd6, 0x35, 0xcc, 0x85, 0xa8, 0xbf, 0x51, 0xe0, 0xa5, 0xc2, 0x4a, 0x4b, 0x9c, 0x56, 0x19, - 0x78, 0x5a, 0x02, 0xe0, 0x10, 0x97, 0x74, 0x28, 0xa3, 0xae, 0x97, 0x33, 0xd8, 0xd2, 0xfd, 0x5c, - 0x5e, 0xec, 0x1b, 0x98, 0xfc, 0x64, 0xeb, 0x1e, 0xa3, 0x16, 0xdf, 0xc1, 0xe2, 0x99, 0xb9, 0x13, - 0x09, 0xc2, 0x09, 0xa1, 0xea, 0x1f, 0x2b, 0x70, 0x6a, 0xc7, 0x6e, 0xb7, 0xf4, 0x03, 0xda, 0xf6, - 0x4d, 0xc3, 0xda, 0xe7, 0x97, 0x62, 0x7a, 0x8f, 0x9d, 0xc0, 0xc0, 0xfe, 0x30, 0x35, 0xb0, 0x9f, - 0xd1, 0x88, 0xf3, 0x6c, 0x2c, 0x9a, 0xdc, 0xe8, 0x13, 0xbe, 0xcd, 0x12, 0xe6, 0x87, 0xdd, 0xf7, - 0xf2, 0x73, 0xc8, 0x16, 0xfc, 0x71, 0x64, 0x82, 0x6f, 0x2c, 0xe5, 0xaa, 0x7f, 0x57, 0x60, 0x39, - 0x8f, 0xed, 0x04, 0x86, 0xf0, 0xf7, 0xd2, 0x43, 0x78, 0x6d, 0xf8, 0xb3, 0x15, 0x4c, 0xe3, 0xcf, - 0x0a, 0xce, 0x24, 0xc6, 0xf2, 0x65, 0x98, 0x09, 0xda, 0x15, 0x6d, 0xf3, 0x69, 0x24, 0x13, 0xf7, - 0x94, 0x14, 0x34, 0xd3, 0x4a, 0xe0, 0x70, 0x8a, 0x12, 0xbd, 0x0b, 0x73, 0x8e, 0xcd, 0xa8, 0xc5, - 0x0c, 0x62, 0x06, 0x23, 0x31, 0xb8, 0x4c, 0x22, 0xde, 0xd7, 0x76, 0x52, 0x18, 0x9c, 0xa1, 0x54, - 0x7f, 0xa9, 0xc0, 0x4a, 0x71, 0x74, 0xd0, 0x4f, 0x61, 0x2e, 0x3c, 0xb1, 0xd8, 0x97, 0x4b, 0x5e, - 0xf0, 0x70, 0x92, 0x27, 0x96, 0x2d, 0x43, 0x7e, 0x26, 0xec, 0xb9, 0x29, 0x32, 0x0f, 0x67, 0x54, - 0xa9, 0xbf, 0xae, 0xc0, 0x6c, 0x8a, 0xe4, 0x04, 0x4a, 0xe6, 0x56, 0xaa, 0x64, 0x9a, 0xc3, 0x1c, - 0xb3, 0xa8, 0x56, 0xee, 0x66, 0x6a, 0xe5, 0xd2, 0x30, 0x42, 0x07, 0x17, 0x49, 0x4f, 0x81, 0x5a, - 0x8a, 0x9e, 0xef, 0x10, 0x7e, 0x87, 0xba, 0x98, 0xee, 0x51, 0x97, 0x5a, 0x3a, 0x45, 0x17, 0x61, - 0x92, 0x38, 0xc6, 0x15, 0xd7, 0xf6, 0x1d, 0x99, 0x52, 0x51, 0xea, 0xaf, 0xef, 0x6c, 0x0b, 0x38, - 0x8e, 0x28, 0x38, 0x75, 0x68, 0x91, 0x9c, 0x00, 0x89, 0x3b, 0x61, 0x00, 0xc7, 0x11, 0x45, 0xb4, - 0x18, 0x8c, 0x16, 0x2e, 0x06, 0x1a, 0x54, 0x7d, 0xa3, 0x2d, 0x2f, 0xb2, 0x6f, 0x48, 0x82, 0xea, - 0xed, 0xed, 0xcd, 0xa7, 0xdd, 0xfa, 0xf9, 0xa2, 0xf7, 0x53, 0x76, 0xec, 0x50, 0xaf, 0x71, 0x7b, - 0x7b, 0x13, 0x73, 0x66, 0xf5, 0x2f, 0x0a, 0x2c, 0xa6, 0x0e, 0x79, 0x02, 0x2d, 0x60, 0x27, 0xdd, - 0x02, 0x5e, 0x1b, 0x22, 0x64, 0x05, 0xb5, 0x7f, 0x5f, 0x81, 0xb3, 0x03, 0xcb, 0xa2, 0xc4, 0x9a, - 0xf5, 0x1d, 0x98, 0xf7, 0xad, 0xf4, 0xf2, 0x1b, 0x54, 0xfa, 0x12, 0x5f, 0xb1, 0x6e, 0xa7, 0x51, - 0x38, 0x4b, 0xcb, 0xaf, 0x5b, 0x8b, 0x7d, 0x29, 0x8b, 0x3e, 0xc8, 0xbe, 0x3c, 0x5f, 0x28, 0x7d, - 0xe5, 0x1e, 0xf0, 0xdc, 0x9c, 0x7e, 0x16, 0xae, 0x94, 0x7a, 0x16, 0xfe, 0xbc, 0x02, 0x4b, 0x39, - 0xd9, 0x8f, 0x3e, 0x06, 0x88, 0xb7, 0xae, 0x9c, 0x60, 0xe7, 0x18, 0xd9, 0xf7, 0xa8, 0x34, 0x27, - 0xde, 0x83, 0x63, 0x68, 0x42, 0x22, 0xf2, 0x60, 0xda, 0xa5, 0x1e, 0x75, 0x8f, 0x68, 0xfb, 0x7d, - 0xdb, 0x95, 0x21, 0xff, 0xf6, 0x10, 0x21, 0xef, 0xab, 0xba, 0x78, 0xb9, 0xc3, 0xb1, 0x60, 0x9c, - 0xd4, 0x82, 0x5a, 0x70, 0xba, 0x4d, 0x49, 0xc2, 0x4c, 0xb1, 0xa6, 0xd1, 0xb6, 0x7c, 0x43, 0x3a, - 0x2b, 0x05, 0x9c, 0xde, 0xcc, 0x23, 0xc2, 0xf9, 0xbc, 0xea, 0x3f, 0x15, 0x38, 0x9d, 0xb2, 0xec, - 0x03, 0xda, 0x71, 0x4c, 0xc2, 0xe8, 0x09, 0x74, 0xce, 0xbb, 0xa9, 0xce, 0xf9, 0xce, 0x10, 0xee, - 0x0b, 0x8d, 0x2c, 0x7c, 0x27, 0xf8, 0x87, 0x02, 0x2f, 0xe5, 0x72, 0x9c, 0x40, 0x27, 0xf8, 0x30, - 0xdd, 0x09, 0xde, 0x7c, 0x8e, 0x73, 0x15, 0x74, 0x84, 0x47, 0x45, 0xa7, 0x6a, 0x05, 0x1b, 0xd6, - 0x97, 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x26, 0xa4, 0xe4, 0x37, 0x86, 0x12, 0x3d, 0x6d, 0x0d, - 0x40, 0xfe, 0x40, 0x16, 0xbe, 0x9f, 0x55, 0x63, 0xbb, 0xaf, 0x44, 0x18, 0x9c, 0xa0, 0x42, 0x57, - 0x01, 0x85, 0x16, 0xb6, 0x4c, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x55, 0xf0, 0xae, 0x48, 0x5e, 0x84, - 0xfb, 0x28, 0x70, 0x0e, 0x97, 0xfa, 0x57, 0x25, 0x5e, 0x32, 0x04, 0xf8, 0xff, 0xd5, 0xf3, 0xc2, - 0xb8, 0x42, 0xcf, 0x27, 0x87, 0xa4, 0xa0, 0x0c, 0x4b, 0xc3, 0x94, 0x29, 0xfd, 0xe2, 0x4a, 0x23, - 0x94, 0xf8, 0x9c, 0x43, 0x52, 0x58, 0x57, 0x50, 0x12, 0x0f, 0xaa, 0x99, 0x53, 0x88, 0x52, 0x28, - 0x7b, 0x99, 0xbb, 0x26, 0xaf, 0xae, 0x81, 0x5b, 0x5f, 0x2d, 0x67, 0x0e, 0x4f, 0xd3, 0xdc, 0x6b, - 0xee, 0x45, 0x98, 0xb4, 0xec, 0x36, 0x15, 0x8f, 0x19, 0x99, 0x55, 0xe8, 0x86, 0x84, 0xe3, 0x88, - 0xa2, 0xef, 0xe7, 0xd5, 0xd1, 0x17, 0xf4, 0xf3, 0x2a, 0x5f, 0xdf, 0x4c, 0xb9, 0xd5, 0x8f, 0x89, - 0xc9, 0x10, 0xaf, 0x6f, 0x12, 0x8e, 0x23, 0x0a, 0x74, 0x33, 0x9e, 0xe5, 0xe3, 0x22, 0x26, 0x5f, - 0x2b, 0x33, 0xcb, 0x8b, 0xc7, 0xb8, 0xa6, 0x3d, 0x7c, 0x52, 0x1b, 0x79, 0xf4, 0xa4, 0x36, 0xf2, - 0xc5, 0x93, 0xda, 0xc8, 0xfd, 0x5e, 0x4d, 0x79, 0xd8, 0xab, 0x29, 0x8f, 0x7a, 0x35, 0xe5, 0x8b, - 0x5e, 0x4d, 0x79, 0xdc, 0xab, 0x29, 0x9f, 0xfd, 0xbb, 0x36, 0xf2, 0xfd, 0x97, 0x07, 0xfd, 0x17, - 0xc1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x8f, 0x9b, 0x77, 0x64, 0x20, 0x00, 0x00, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 35a7fbafa..b4428ad45 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -843,7 +843,7 @@ message ResourceSlice { message ResourceSliceList { // Standard list metadata // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta listMeta = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of resource ResourceSlices. repeated ResourceSlice items = 2; diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index 298d8d107..4efd2491d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -290,7 +290,7 @@ type ResourceSliceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional - metav1.ListMeta `json:"listMeta" protobuf:"bytes,1,opt,name=listMeta"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of resource ResourceSlices. Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 8154c99ce..1a44a971d 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (ResourceSlice) SwaggerDoc() map[string]string { var map_ResourceSliceList = map[string]string{ "": "ResourceSliceList is a collection of ResourceSlices.", - "listMeta": "Standard list metadata", + "metadata": "Standard list metadata", "items": "Items is the list of resource ResourceSlices.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index 8740055b5..c0dc3dac3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,14 @@ -# cloud.google.com/go v0.115.1 -## explicit; go 1.20 +# cel.dev/expr v0.16.1 +## explicit; go 1.18 +cel.dev/expr +# cloud.google.com/go v0.116.0 +## explicit; go 1.21 cloud.google.com/go cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.9.1 +# cloud.google.com/go/auth v0.10.0 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -17,41 +20,48 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.4 -## explicit; go 1.20 +# cloud.google.com/go/auth/oauth2adapt v0.2.5 +## explicit; go 1.21 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/firestore v1.16.0 -## explicit; go 1.20 +# cloud.google.com/go/firestore v1.17.0 +## explicit; go 1.21 cloud.google.com/go/firestore/apiv1 cloud.google.com/go/firestore/apiv1/firestorepb cloud.google.com/go/firestore/internal -# cloud.google.com/go/iam v1.1.13 -## explicit; go 1.20 +# cloud.google.com/go/iam v1.2.1 +## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb cloud.google.com/go/iam/credentials/apiv1 cloud.google.com/go/iam/credentials/apiv1/credentialspb cloud.google.com/go/iam/internal -# cloud.google.com/go/kms v1.18.5 -## explicit; go 1.20 +# cloud.google.com/go/kms v1.20.0 +## explicit; go 1.21 cloud.google.com/go/kms/apiv1 cloud.google.com/go/kms/apiv1/kmspb cloud.google.com/go/kms/internal -# cloud.google.com/go/longrunning v0.5.12 -## explicit; go 1.20 +# cloud.google.com/go/longrunning v0.6.1 +## explicit; go 1.21 cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen cloud.google.com/go/longrunning/autogen/longrunningpb -# cloud.google.com/go/storage v1.43.0 -## explicit; go 1.20 +# cloud.google.com/go/monitoring v1.21.1 +## explicit; go 1.21 +cloud.google.com/go/monitoring/apiv3/v2 +cloud.google.com/go/monitoring/apiv3/v2/monitoringpb +cloud.google.com/go/monitoring/internal +# cloud.google.com/go/storage v1.46.0 +## explicit; go 1.21 cloud.google.com/go/storage +cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb @@ -70,14 +80,14 @@ filippo.io/edwards25519/field github.com/AlecAivazis/survey/v2 github.com/AlecAivazis/survey/v2/core github.com/AlecAivazis/survey/v2/terminal -# github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 -## explicit; go 1.17 -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper +# github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 +## explicit; go 1.16 +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider # github.com/Azure/azure-sdk-for-go v68.0.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -99,7 +109,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -170,7 +180,16 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/IBM/sarama v1.43.0 +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping +# github.com/IBM/sarama v1.43.3 ## explicit; go 1.19 github.com/IBM/sarama # github.com/Microsoft/go-winio v0.6.2 @@ -237,7 +256,7 @@ github.com/alibabacloud-go/tea-utils/service # github.com/alibabacloud-go/tea-xml v1.1.3 ## explicit; go 1.14 github.com/alibabacloud-go/tea-xml/service -# github.com/aliyun/credentials-go v1.3.1 +# github.com/aliyun/credentials-go v1.3.2 ## explicit; go 1.14 github.com/aliyun/credentials-go/credentials github.com/aliyun/credentials-go/credentials/request @@ -385,8 +404,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 +## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/kms github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints github.com/aws/aws-sdk-go-v2/service/kms/types @@ -460,9 +479,9 @@ github.com/blendle/zapdriver # github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 ## explicit; go 1.16 github.com/bradleyfalzon/ghinstallation/v2 -# github.com/cenkalti/backoff/v3 v3.2.2 -## explicit; go 1.12 -github.com/cenkalti/backoff/v3 +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 # github.com/census-instrumentation/opencensus-proto v0.4.1 ## explicit; go 1.18 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 @@ -496,6 +515,16 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 +# github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 +## explicit; go 1.19 +github.com/cncf/xds/go/udpa/annotations +github.com/cncf/xds/go/udpa/type/v1 +github.com/cncf/xds/go/xds/annotations/v3 +github.com/cncf/xds/go/xds/core/v3 +github.com/cncf/xds/go/xds/data/orca/v3 +github.com/cncf/xds/go/xds/service/orca/v3 +github.com/cncf/xds/go/xds/type/matcher/v3 +github.com/cncf/xds/go/xds/type/v3 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure @@ -521,7 +550,7 @@ github.com/digitorus/timestamp # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/docker/cli v27.1.2+incompatible +# github.com/docker/cli v27.3.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -530,7 +559,7 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v27.1.2+incompatible +# github.com/docker/docker v27.3.1+incompatible ## explicit github.com/docker/docker/pkg/homedir # github.com/docker/docker-credential-helpers v0.8.0 @@ -540,7 +569,7 @@ github.com/docker/docker-credential-helpers/credentials # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.6.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 @@ -553,6 +582,49 @@ github.com/eapache/queue ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/envoyproxy/go-control-plane v0.13.0 +## explicit; go 1.21 +github.com/envoyproxy/go-control-plane/envoy/admin/v3 +github.com/envoyproxy/go-control-plane/envoy/annotations +github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3 +github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3 +github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3 +github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3 +github.com/envoyproxy/go-control-plane/envoy/config/core/v3 +github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3 +github.com/envoyproxy/go-control-plane/envoy/config/listener/v3 +github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3 +github.com/envoyproxy/go-control-plane/envoy/config/overload/v3 +github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3 +github.com/envoyproxy/go-control-plane/envoy/config/route/v3 +github.com/envoyproxy/go-control-plane/envoy/config/tap/v3 +github.com/envoyproxy/go-control-plane/envoy/config/trace/v3 +github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3 +github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3 +github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3 +github.com/envoyproxy/go-control-plane/envoy/service/status/v3 +github.com/envoyproxy/go-control-plane/envoy/type/http/v3 +github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3 +github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3 +github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3 +github.com/envoyproxy/go-control-plane/envoy/type/v3 +# github.com/envoyproxy/protoc-gen-validate v1.1.0 +## explicit; go 1.19 +github.com/envoyproxy/protoc-gen-validate/validate # github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch @@ -566,9 +638,10 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/fvbommel/sortorder v1.1.0 ## explicit; go 1.13 github.com/fvbommel/sortorder @@ -636,7 +709,7 @@ github.com/go-jose/go-jose/v3/cipher github.com/go-jose/go-jose/v3/cryptosigner github.com/go-jose/go-jose/v3/json github.com/go-jose/go-jose/v3/jwt -# github.com/go-jose/go-jose/v4 v4.0.2 +# github.com/go-jose/go-jose/v4 v4.0.4 ## explicit; go 1.21 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher @@ -869,7 +942,7 @@ github.com/google/uuid # github.com/google/wire v0.6.0 ## explicit; go 1.12 github.com/google/wire -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -880,6 +953,7 @@ github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal +github.com/googleapis/gax-go/v2/iterator # github.com/gorilla/websocket v1.5.3 ## explicit; go 1.12 github.com/gorilla/websocket @@ -937,13 +1011,13 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/vault/api v1.14.0 +# github.com/hashicorp/vault/api v1.15.0 ## explicit; go 1.21 github.com/hashicorp/vault/api # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/in-toto/attestation v1.0.1 +# github.com/in-toto/attestation v1.1.0 ## explicit; go 1.20 github.com/in-toto/attestation/go/v1 # github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 @@ -1005,7 +1079,7 @@ github.com/jcmturner/rpc/v2/ndr # github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 ## explicit; go 1.20 github.com/jedisct1/go-minisign -# github.com/jellydator/ttlcache/v3 v3.2.0 +# github.com/jellydator/ttlcache/v3 v3.3.0 ## explicit; go 1.18 github.com/jellydator/ttlcache/v3 # github.com/jmespath/go-jmespath v0.4.0 @@ -1122,8 +1196,8 @@ github.com/monochromegane/go-gitignore # github.com/montanaflynn/stats v0.7.1 ## explicit; go 1.13 github.com/montanaflynn/stats -# github.com/mozillazg/docker-credential-acr-helper v0.3.0 -## explicit; go 1.17 +# github.com/mozillazg/docker-credential-acr-helper v0.4.0 +## explicit; go 1.18 github.com/mozillazg/docker-credential-acr-helper/pkg/acr github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper github.com/mozillazg/docker-credential-acr-helper/pkg/version @@ -1239,8 +1313,8 @@ github.com/openshift/client-go/user/clientset/versioned/typed/user/v1 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/pelletier/go-toml/v2 v2.2.2 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger @@ -1262,6 +1336,15 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 +## explicit; go 1.20 +github.com/planetscale/vtprotobuf/protohelpers +github.com/planetscale/vtprotobuf/types/known/anypb +github.com/planetscale/vtprotobuf/types/known/durationpb +github.com/planetscale/vtprotobuf/types/known/emptypb +github.com/planetscale/vtprotobuf/types/known/structpb +github.com/planetscale/vtprotobuf/types/known/timestamppb +github.com/planetscale/vtprotobuf/types/known/wrapperspb # github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -1317,8 +1400,8 @@ github.com/segmentio/ksuid # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec -# github.com/sigstore/cosign/v2 v2.4.0 -## explicit; go 1.22.5 +# github.com/sigstore/cosign/v2 v2.4.1 +## explicit; go 1.22.7 github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio github.com/sigstore/cosign/v2/cmd/cosign/cli/options github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy @@ -1352,8 +1435,8 @@ github.com/sigstore/cosign/v2/pkg/providers github.com/sigstore/cosign/v2/pkg/providers/filesystem github.com/sigstore/cosign/v2/pkg/signature github.com/sigstore/cosign/v2/pkg/types -# github.com/sigstore/fulcio v1.5.1 -## explicit; go 1.22.4 +# github.com/sigstore/fulcio v1.6.3 +## explicit; go 1.22.5 github.com/sigstore/fulcio/pkg/api # github.com/sigstore/protobuf-specs v0.3.2 ## explicit; go 1.18 @@ -1391,8 +1474,8 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots github.com/sigstore/sigstore/pkg/oauth @@ -1403,17 +1486,17 @@ github.com/sigstore/sigstore/pkg/signature/kms github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload github.com/sigstore/sigstore/pkg/tuf -# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/aws -# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/azure -# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/gcp -# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/hashivault # github.com/sigstore/timestamp-authority v1.2.2 ## explicit; go 1.21 @@ -1456,7 +1539,7 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features -# github.com/spiffe/go-spiffe/v2 v2.3.0 +# github.com/spiffe/go-spiffe/v2 v2.4.0 ## explicit; go 1.21 github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle @@ -1491,8 +1574,8 @@ github.com/syndtr/goleveldb/leveldb/opt github.com/syndtr/goleveldb/leveldb/storage github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util -# github.com/tektoncd/chains v0.22.0 -## explicit; go 1.22 +# github.com/tektoncd/chains v0.23.0 +## explicit; go 1.22.8 github.com/tektoncd/chains/internal/backport github.com/tektoncd/chains/pkg/artifacts github.com/tektoncd/chains/pkg/chains @@ -1516,8 +1599,8 @@ github.com/tektoncd/chains/pkg/chains/storage/pubsub github.com/tektoncd/chains/pkg/chains/storage/tekton github.com/tektoncd/chains/pkg/config github.com/tektoncd/chains/pkg/patch -# github.com/tektoncd/cli v0.38.1 -## explicit; go 1.22.5 +# github.com/tektoncd/cli v0.39.0 +## explicit; go 1.22.8 github.com/tektoncd/cli/pkg/actions github.com/tektoncd/cli/pkg/bundle github.com/tektoncd/cli/pkg/chain @@ -1567,8 +1650,8 @@ github.com/tektoncd/cli/pkg/triggertemplate github.com/tektoncd/cli/pkg/trustedresources github.com/tektoncd/cli/pkg/version github.com/tektoncd/cli/pkg/workspaces -# github.com/tektoncd/hub v1.18.0 -## explicit; go 1.22 +# github.com/tektoncd/hub v1.19.0 +## explicit; go 1.22.0 github.com/tektoncd/hub/api/pkg/cli/app github.com/tektoncd/hub/api/pkg/cli/cmd github.com/tektoncd/hub/api/pkg/cli/cmd/check_upgrade @@ -1596,7 +1679,7 @@ github.com/tektoncd/hub/api/v1/gen/http/catalog/client github.com/tektoncd/hub/api/v1/gen/http/resource/client github.com/tektoncd/hub/api/v1/gen/resource github.com/tektoncd/hub/api/v1/gen/resource/views -# github.com/tektoncd/pipeline v0.65.0 +# github.com/tektoncd/pipeline v0.65.2 ## explicit; go 1.22 github.com/tektoncd/pipeline/internal/artifactref github.com/tektoncd/pipeline/pkg/apis/config @@ -1654,7 +1737,7 @@ github.com/tektoncd/results/pkg/cli/portforward github.com/tektoncd/results/proto/pipeline/v1/pipeline_go_proto github.com/tektoncd/results/proto/v1alpha2/results_go_proto github.com/tektoncd/results/proto/v1alpha3/results_go_proto -# github.com/tektoncd/triggers v0.29.1 +# github.com/tektoncd/triggers v0.30.0 ## explicit; go 1.22 github.com/tektoncd/triggers/pkg/apis/config github.com/tektoncd/triggers/pkg/apis/triggers @@ -1701,7 +1784,7 @@ github.com/vbatts/tar-split/archive/tar # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# github.com/xanzy/go-gitlab v0.108.0 +# github.com/xanzy/go-gitlab v0.109.0 ## explicit; go 1.19 github.com/xanzy/go-gitlab # github.com/xdg-go/pbkdf2 v1.0.0 @@ -1719,13 +1802,13 @@ github.com/xlab/treeprint # github.com/xlzd/gotp v0.1.0 ## explicit; go 1.17 github.com/xlzd/gotp -# github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 -## explicit; go 1.22 +# github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 +## explicit; go 1.17 github.com/youmark/pkcs8 # github.com/zeebo/errs v1.3.0 ## explicit; go 1.12 github.com/zeebo/errs -# go.mongodb.org/mongo-driver v1.15.0 +# go.mongodb.org/mongo-driver v1.16.1 ## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -1794,16 +1877,20 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 +# go.opentelemetry.io/contrib/detectors/gcp v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/contrib/detectors/gcp +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 +# go.opentelemetry.io/otel v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1819,12 +1906,27 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 -# go.opentelemetry.io/otel/metric v1.28.0 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.28.0 +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal/x +go.opentelemetry.io/otel/sdk/resource +# go.opentelemetry.io/otel/sdk/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/internal +go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/exemplar +go.opentelemetry.io/otel/sdk/metric/internal/x +go.opentelemetry.io/otel/sdk/metric/metricdata +# go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1836,8 +1938,8 @@ go.starlark.net/resolve go.starlark.net/starlark go.starlark.net/starlarkstruct go.starlark.net/syntax -# go.step.sm/crypto v0.51.1 -## explicit; go 1.21 +# go.step.sm/crypto v0.51.2 +## explicit; go 1.22 go.step.sm/crypto/fingerprint go.step.sm/crypto/internal/bcrypt_pbkdf go.step.sm/crypto/internal/emoji @@ -1862,8 +1964,8 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# goa.design/goa/v3 v3.18.2 -## explicit; go 1.21.0 +# goa.design/goa/v3 v3.19.1 +## explicit; go 1.22.0 goa.design/goa/v3/http goa.design/goa/v3/pkg # gocloud.dev v0.40.0 @@ -1889,13 +1991,13 @@ gocloud.dev/pubsub gocloud.dev/pubsub/batcher gocloud.dev/pubsub/driver gocloud.dev/pubsub/mempubsub -# gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 +# gocloud.dev/docstore/mongodocstore v0.40.0 ## explicit; go 1.22 gocloud.dev/docstore/mongodocstore -# gocloud.dev/pubsub/kafkapubsub v0.37.0 -## explicit; go 1.20 +# gocloud.dev/pubsub/kafkapubsub v0.40.0 +## explicit; go 1.21.0 gocloud.dev/pubsub/kafkapubsub -# golang.org/x/crypto v0.26.0 +# golang.org/x/crypto v0.28.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -1929,7 +2031,7 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 +# golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1937,10 +2039,10 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.20.0 -## explicit; go 1.18 +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/sumdb/note -# golang.org/x/net v0.28.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -1955,7 +2057,7 @@ golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1974,17 +2076,17 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.25.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.23.0 +# golang.org/x/term v0.26.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding @@ -2003,7 +2105,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 @@ -2013,7 +2115,7 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.195.0 +# google.golang.org/api v0.203.0 ## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -2032,36 +2134,51 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c +# google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/cloud/location +google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 +# google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +google.golang.org/genproto/googleapis/api/label +google.golang.org/genproto/googleapis/api/metric +google.golang.org/genproto/googleapis/api/monitoredres +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.0 +# google.golang.org/grpc v1.67.1 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes +google.golang.org/grpc/authz/audit +google.golang.org/grpc/authz/audit/stdout google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/leastrequest google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/rls +google.golang.org/grpc/balancer/rls/internal/adaptive +google.golang.org/grpc/balancer/rls/internal/keys google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/balancer/weightedroundrobin +google.golang.org/grpc/balancer/weightedroundrobin/internal +google.golang.org/grpc/balancer/weightedtarget +google.golang.org/grpc/balancer/weightedtarget/weightedaggregator google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz google.golang.org/grpc/codes @@ -2077,28 +2194,38 @@ google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/insecure google.golang.org/grpc/credentials/oauth +google.golang.org/grpc/credentials/tls/certprovider +google.golang.org/grpc/credentials/tls/certprovider/pemfile google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal +google.golang.org/grpc/internal/admin google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancer/nop +google.golang.org/grpc/internal/balancergroup google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/cache google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/credentials/xds google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/hierarchy google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proto/grpc_lookup_v1 google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/dns/internal @@ -2110,10 +2237,17 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/internal/wrr google.golang.org/grpc/internal/xds +google.golang.org/grpc/internal/xds/bootstrap +google.golang.org/grpc/internal/xds/bootstrap/tlscreds +google.golang.org/grpc/internal/xds/matcher +google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive google.golang.org/grpc/mem google.golang.org/grpc/metadata +google.golang.org/grpc/orca +google.golang.org/grpc/orca/internal google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns @@ -2122,8 +2256,45 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +google.golang.org/grpc/xds +google.golang.org/grpc/xds/bootstrap +google.golang.org/grpc/xds/csds +google.golang.org/grpc/xds/googledirectpath +google.golang.org/grpc/xds/internal +google.golang.org/grpc/xds/internal/balancer +google.golang.org/grpc/xds/internal/balancer/cdsbalancer +google.golang.org/grpc/xds/internal/balancer/clusterimpl +google.golang.org/grpc/xds/internal/balancer/clustermanager +google.golang.org/grpc/xds/internal/balancer/clusterresolver +google.golang.org/grpc/xds/internal/balancer/loadstore +google.golang.org/grpc/xds/internal/balancer/outlierdetection +google.golang.org/grpc/xds/internal/balancer/priority +google.golang.org/grpc/xds/internal/balancer/ringhash +google.golang.org/grpc/xds/internal/balancer/wrrlocality +google.golang.org/grpc/xds/internal/clusterspecifier +google.golang.org/grpc/xds/internal/clusterspecifier/rls +google.golang.org/grpc/xds/internal/httpfilter +google.golang.org/grpc/xds/internal/httpfilter/fault +google.golang.org/grpc/xds/internal/httpfilter/rbac +google.golang.org/grpc/xds/internal/httpfilter/router +google.golang.org/grpc/xds/internal/resolver +google.golang.org/grpc/xds/internal/resolver/internal +google.golang.org/grpc/xds/internal/server +google.golang.org/grpc/xds/internal/xdsclient +google.golang.org/grpc/xds/internal/xdsclient/internal +google.golang.org/grpc/xds/internal/xdsclient/load +google.golang.org/grpc/xds/internal/xdsclient/transport +google.golang.org/grpc/xds/internal/xdsclient/transport/internal +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter +google.golang.org/grpc/xds/internal/xdsclient/xdsresource +google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version +# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +## explicit; go 1.21 +google.golang.org/grpc/stats/opentelemetry +google.golang.org/grpc/stats/opentelemetry/internal +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -2178,7 +2349,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.31.0 +# k8s.io/api v0.31.3 ## explicit; go 1.22.0 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 @@ -2239,7 +2410,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.31.0 +# k8s.io/apimachinery v0.31.3 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -2301,7 +2472,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/cli-runtime v0.29.8 +# k8s.io/cli-runtime v0.29.11 ## explicit; go 1.21 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions